diff --git a/.circleci/config.yml b/.circleci/config.yml index 1d90b871..138cfbc6 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -15,7 +15,7 @@ workflows: python_version: ["3.8", "3.9", "3.10", "3.11"] # "3.12" arangodb_config: ["single", "cluster"] arangodb_license: ["community", "enterprise"] - arangodb_version: ["3.10.10", "3.11.4", "latest"] + arangodb_version: ["3.11", "latest"] jobs: lint: diff --git a/README.md b/README.md index ac3bf680..9526ddba 100644 --- a/README.md +++ b/README.md @@ -1,15 +1,15 @@ ![Logo](https://user-images.githubusercontent.com/2701938/108583516-c3576680-72ee-11eb-883f-2d9b52e74e45.png) -[![CircleCI](https://dl.circleci.com/status-badge/img/gh/ArangoDB-Community/python-arango/tree/main.svg?style=svg)](https://dl.circleci.com/status-badge/redirect/gh/ArangoDB-Community/python-arango/tree/main) -[![CodeQL](https://github.com/ArangoDB-Community/python-arango/actions/workflows/codeql.yaml/badge.svg)](https://github.com/ArangoDB-Community/python-arango/actions/workflows/codeql.yaml) -[![Docs](https://github.com/ArangoDB-Community/python-arango/actions/workflows/docs.yaml/badge.svg)](https://github.com/ArangoDB-Community/python-arango/actions/workflows/docs.yaml) -[![Coverage Status](https://codecov.io/gh/ArangoDB-Community/python-arango/branch/main/graph/badge.svg?token=M8zrjrzsUY)](https://codecov.io/gh/ArangoDB-Community/python-arango) -[![Last commit](https://img.shields.io/github/last-commit/ArangoDB-Community/python-arango)](https://github.com/ArangoDB-Community/python-arango/commits/master) +[![CircleCI](https://dl.circleci.com/status-badge/img/gh/arangodb/python-arango/tree/main.svg?style=svg)](https://dl.circleci.com/status-badge/redirect/gh/arangodb/python-arango/tree/main) +[![CodeQL](https://github.com/arangodb/python-arango/actions/workflows/codeql.yaml/badge.svg)](https://github.com/arangodb/python-arango/actions/workflows/codeql.yaml) +[![Docs](https://github.com/arangodb/python-arango/actions/workflows/docs.yaml/badge.svg)](https://github.com/arangodb/python-arango/actions/workflows/docs.yaml) +[![Coverage Status](https://codecov.io/gh/arangodb/python-arango/branch/main/graph/badge.svg?token=M8zrjrzsUY)](https://codecov.io/gh/arangodb/python-arango) +[![Last commit](https://img.shields.io/github/last-commit/arangodb/python-arango)](https://github.com/arangodb/python-arango/commits/master) [![PyPI version badge](https://img.shields.io/pypi/v/python-arango?color=3775A9&style=for-the-badge&logo=pypi&logoColor=FFD43B)](https://pypi.org/project/python-arango/) [![Python versions badge](https://img.shields.io/badge/3.8%2B-3776AB?style=for-the-badge&logo=python&logoColor=FFD43B&label=Python)](https://pypi.org/project/python-arango/) -[![License](https://img.shields.io/github/license/ArangoDB-Community/python-arango?color=9E2165&style=for-the-badge)](https://github.com/ArangoDB-Community/python-arango/blob/master/LICENSE) +[![License](https://img.shields.io/github/license/arangodb/python-arango?color=9E2165&style=for-the-badge)](https://github.com/arangodb/python-arango/blob/master/LICENSE) [![Code style: black](https://img.shields.io/static/v1?style=for-the-badge&label=code%20style&message=black&color=black)](https://github.com/psf/black) [![Downloads](https://img.shields.io/pepy/dt/python-arango?style=for-the-badge&color=282661 )](https://pepy.tech/project/python-arango) @@ -21,7 +21,7 @@ database natively supporting documents, graphs and search. ## Requirements -- ArangoDB version 3.9+ +- ArangoDB version 3.11+ - Python version 3.8+ ## Installation diff --git a/arango/client.py b/arango/client.py index 1666982e..2b6e4993 100644 --- a/arango/client.py +++ b/arango/client.py @@ -13,7 +13,12 @@ ) from arango.database import StandardDatabase from arango.exceptions import ServerConnectionError -from arango.http import DEFAULT_REQUEST_TIMEOUT, DefaultHTTPClient, HTTPClient +from arango.http import ( + DEFAULT_REQUEST_TIMEOUT, + DefaultHTTPClient, + HTTPClient, + RequestCompression, +) from arango.resolver import ( FallbackHostResolver, HostResolver, @@ -33,7 +38,7 @@ def default_serializer(x: Any) -> str: :return: The object serialized as a JSON string :rtype: str """ - return dumps(x) + return dumps(x, separators=(",", ":")) def default_deserializer(x: str) -> Any: @@ -85,6 +90,12 @@ class ArangoClient: None: No timeout. int: Timeout value in seconds. :type request_timeout: int | float + :param request_compression: Will compress requests to the server according to + the given algorithm. No compression happens by default. + :type request_compression: arango.http.RequestCompression | None + :param response_compression: Tells the server what compression algorithm is + acceptable for the response. No compression happens by default. + :type response_compression: str | None """ def __init__( @@ -97,6 +108,8 @@ def __init__( deserializer: Callable[[str], Any] = default_deserializer, verify_override: Union[bool, str, None] = None, request_timeout: Union[int, float, None] = DEFAULT_REQUEST_TIMEOUT, + request_compression: Optional[RequestCompression] = None, + response_compression: Optional[str] = None, ) -> None: if isinstance(hosts, str): self._hosts = [host.strip("/") for host in hosts.split(",")] @@ -133,6 +146,9 @@ def __init__( for session in self._sessions: session.verify = verify_override + self._request_compression = request_compression + self._response_compression = response_compression + def __repr__(self) -> str: return f"" @@ -231,6 +247,8 @@ def db( serializer=self._serializer, deserializer=self._deserializer, superuser_token=superuser_token, + request_compression=self._request_compression, + response_compression=self._response_compression, ) elif user_token is not None: connection = JwtConnection( @@ -242,6 +260,8 @@ def db( serializer=self._serializer, deserializer=self._deserializer, user_token=user_token, + request_compression=self._request_compression, + response_compression=self._response_compression, ) elif auth_method.lower() == "basic": connection = BasicConnection( @@ -254,6 +274,8 @@ def db( http_client=self._http, serializer=self._serializer, deserializer=self._deserializer, + request_compression=self._request_compression, + response_compression=self._response_compression, ) elif auth_method.lower() == "jwt": connection = JwtConnection( @@ -266,6 +288,8 @@ def db( http_client=self._http, serializer=self._serializer, deserializer=self._deserializer, + request_compression=self._request_compression, + response_compression=self._response_compression, ) else: raise ValueError(f"invalid auth_method: {auth_method}") diff --git a/arango/cluster.py b/arango/cluster.py index a272f50c..dffaeb04 100644 --- a/arango/cluster.py +++ b/arango/cluster.py @@ -11,6 +11,7 @@ ClusterServerCountError, ClusterServerEngineError, ClusterServerIDError, + ClusterServerModeError, ClusterServerRoleError, ClusterServerStatisticsError, ClusterServerVersionError, @@ -57,6 +58,27 @@ def response_handler(resp: Response) -> str: return self._execute(request, response_handler) + def server_mode(self) -> Result[str]: + """Return the server mode. + + In a read-only server, all write operations will fail + with an error code of 1004 (ERROR_READ_ONLY). Creating or dropping + databases and collections will also fail with error code 11 (ERROR_FORBIDDEN). + + :return: Server mode. Possible values are "default" or "readonly". + :rtype: str + :raise arango.exceptions.ClusterServerModeError: If retrieval fails. + """ + request = Request(method="get", endpoint="/_admin/server/mode") + + def response_handler(resp: Response) -> str: + if resp.is_success: + return str(resp.body["mode"]) + + raise ClusterServerModeError(resp, request) + + return self._execute(request, response_handler) + def server_version(self, server_id: str) -> Result[Json]: """Return the version of the given server. @@ -140,6 +162,58 @@ def response_handler(resp: Response) -> Json: return self._execute(request, response_handler) + def server_maintenance_mode(self, server_id: str) -> Result[Json]: + """Return the maintenance status for the given server. + + :param server_id: Server ID. + :type server_id: str + :return: Maintenance status for the given server. + :rtype: dict + :raise arango.exceptions.ClusterMaintenanceModeError: If retrieval fails. + """ + request = Request( + method="get", + endpoint=f"/_admin/cluster/maintenance/{server_id}", + ) + + def response_handler(resp: Response) -> Json: + if resp.is_success: + result: Json = resp.body.get("result", {}) + return result + + raise ClusterMaintenanceModeError(resp, request) + + return self._execute(request, response_handler) + + def toggle_server_maintenance_mode( + self, server_id: str, mode: str, timeout: Optional[int] = None + ) -> Result[Json]: + """Enable or disable the maintenance mode for the given server. + + :param server_id: Server ID. + :type server_id: str + :param mode: Maintenance mode. Allowed values are "normal" and "maintenance". + :type mode: str + :param timeout: Timeout in seconds. + :type timeout: Optional[int] + :return: Result of the operation. + :rtype: dict + :raise arango.exceptions.ClusterMaintenanceModeError: If toggle fails. + """ + request = Request( + method="put", + endpoint=f"/_admin/cluster/maintenance/{server_id}", + data={"mode": mode, "timeout": timeout}, + ) + + def response_handler(resp: Response) -> Json: + if resp.is_success: + return format_body(resp.body) + + raise ClusterMaintenanceModeError(resp, request) + + return self._execute(request, response_handler) + def health(self) -> Result[Json]: """Return the cluster health. diff --git a/arango/collection.py b/arango/collection.py index 930ef771..181ac001 100644 --- a/arango/collection.py +++ b/arango/collection.py @@ -1373,7 +1373,7 @@ def add_skiplist_index( def add_geo_index( self, fields: Fields, - ordered: Optional[bool] = None, + geo_json: Optional[bool] = None, name: Optional[str] = None, in_background: Optional[bool] = None, legacyPolygons: Optional[bool] = False, @@ -1385,8 +1385,10 @@ def add_geo_index( with at least two floats. Documents with missing fields or invalid values are excluded. :type fields: str | [str] - :param ordered: Whether the order is longitude, then latitude. - :type ordered: bool | None + :param geo_json: Whether to use GeoJSON data-format or not. This + parameter has been renamed from `ordered`. See Github Issue + #234 for more details. + :type geo_json: bool | None :param name: Optional name for the index. :type name: str | None :param in_background: Do not hold the collection lock. @@ -1400,8 +1402,8 @@ def add_geo_index( """ data: Json = {"type": "geo", "fields": fields} - if ordered is not None: - data["geoJson"] = ordered + if geo_json is not None: + data["geoJson"] = geo_json if name is not None: data["name"] = name if in_background is not None: @@ -1617,6 +1619,89 @@ def add_inverted_index( return self._add_index(data) + def add_zkd_index( + self, + fields: Sequence[str], + field_value_types: str = "double", + name: Optional[str] = None, + unique: Optional[bool] = None, + in_background: Optional[bool] = None, + ) -> Result[Json]: + """Create a new ZKD Index. + + :param fields: Document fields to index. Unlike for other indexes the + order of the fields does not matter. + :type fields: Sequence[str] + :param field_value_types: The type of the field values. The only allowed + value is "double" at the moment. Defaults to "double". + :type field_value_types: str + :param name: Optional name for the index. + :type name: str | None + :param unique: Whether the index is unique. + :type unique: bool | None + :param in_background: Do not hold the collection lock. + :type in_background: bool | None + :return: New index details. + :rtype: dict + :raise arango.exceptions.IndexCreateError: If create fails. + """ + data: Json = { + "type": "zkd", + "fields": fields, + "fieldValueTypes": field_value_types, + } + + if unique is not None: + data["unique"] = unique + if name is not None: + data["name"] = name + if in_background is not None: + data["inBackground"] = in_background + + return self._add_index(data) + + def add_mdi_index( + self, + fields: Sequence[str], + field_value_types: str = "double", + name: Optional[str] = None, + unique: Optional[bool] = None, + in_background: Optional[bool] = None, + ) -> Result[Json]: + """Create a new MDI index, previously known as ZKD index. This method + is only usable with ArangoDB 3.12 and later. + + :param fields: Document fields to index. Unlike for other indexes the + order of the fields does not matter. + :type fields: Sequence[str] + :param field_value_types: The type of the field values. The only allowed + value is "double" at the moment. Defaults to "double". + :type field_value_types: str + :param name: Optional name for the index. + :type name: str | None + :param unique: Whether the index is unique. + :type unique: bool | None + :param in_background: Do not hold the collection lock. + :type in_background: bool | None + :return: New index details. + :rtype: dict + :raise arango.exceptions.IndexCreateError: If create fails. + """ + data: Json = { + "type": "mdi", + "fields": fields, + "fieldValueTypes": field_value_types, + } + + if unique is not None: + data["unique"] = unique + if name is not None: + data["name"] = name + if in_background is not None: + data["inBackground"] = in_background + + return self._add_index(data) + def delete_index(self, index_id: str, ignore_missing: bool = False) -> Result[bool]: """Delete an index. @@ -1673,6 +1758,7 @@ def insert_many( keep_none: Optional[bool] = None, merge: Optional[bool] = None, refill_index_caches: Optional[bool] = None, + version_attribute: Optional[str] = None, ) -> Result[Union[bool, List[Union[Json, ArangoServerError]]]]: """Insert multiple documents. @@ -1727,6 +1813,9 @@ def insert_many( index caches if document insertions affect the edge index or cache-enabled persistent indexes. :type refill_index_caches: bool | None + :param version_attribute: support for simple external versioning to + document operations. + :type version_attribute: str :return: List of document metadata (e.g. document keys, revisions) and any exception, or True if parameter **silent** was set to True. :rtype: [dict | ArangoServerError] | bool @@ -1749,6 +1838,8 @@ def insert_many( params["keepNull"] = keep_none if merge is not None: params["mergeObjects"] = merge + if version_attribute is not None: + params["versionAttribute"] = version_attribute # New in ArangoDB 3.9.6 and 3.10.2 if refill_index_caches is not None: @@ -1795,6 +1886,7 @@ def update_many( silent: bool = False, refill_index_caches: Optional[bool] = None, raise_on_document_error: bool = False, + version_attribute: Optional[str] = None, ) -> Result[Union[bool, List[Union[Json, ArangoServerError]]]]: """Update multiple documents. @@ -1847,6 +1939,9 @@ def update_many( as opposed to returning the error as an object in the result list. Defaults to False. :type raise_on_document_error: bool + :param version_attribute: support for simple external versioning to + document operations. + :type version_attribute: str :return: List of document metadata (e.g. document keys, revisions) and any exceptions, or True if parameter **silent** was set to True. :rtype: [dict | ArangoError] | bool @@ -1863,6 +1958,8 @@ def update_many( } if sync is not None: params["waitForSync"] = sync + if version_attribute is not None: + params["versionAttribute"] = version_attribute # New in ArangoDB 3.9.6 and 3.10.2 if refill_index_caches is not None: @@ -1999,6 +2096,7 @@ def replace_many( sync: Optional[bool] = None, silent: bool = False, refill_index_caches: Optional[bool] = None, + version_attribute: Optional[str] = None, ) -> Result[Union[bool, List[Union[Json, ArangoServerError]]]]: """Replace multiple documents. @@ -2040,6 +2138,9 @@ def replace_many( index caches if document operations affect the edge index or cache-enabled persistent indexes. :type refill_index_caches: bool | None + :param version_attribute: support for simple external versioning to + document operations. + :type version_attribute: str :return: List of document metadata (e.g. document keys, revisions) and any exceptions, or True if parameter **silent** was set to True. :rtype: [dict | ArangoServerError] | bool @@ -2054,6 +2155,8 @@ def replace_many( } if sync is not None: params["waitForSync"] = sync + if version_attribute is not None: + params["versionAttribute"] = version_attribute # New in ArangoDB 3.9.6 and 3.10.2 if refill_index_caches is not None: @@ -2528,6 +2631,7 @@ def insert( keep_none: Optional[bool] = None, merge: Optional[bool] = None, refill_index_caches: Optional[bool] = None, + version_attribute: Optional[str] = None, ) -> Result[Union[bool, Json]]: """Insert a new document. @@ -2566,6 +2670,9 @@ def insert( index caches if document insertions affect the edge index or cache-enabled persistent indexes. :type refill_index_caches: bool | None + :param version_attribute: support for simple external versioning to + document operations. + :type version_attribute: str :return: Document metadata (e.g. document key, revision) or True if parameter **silent** was set to True. :rtype: bool | dict @@ -2587,6 +2694,8 @@ def insert( params["keepNull"] = keep_none if merge is not None: params["mergeObjects"] = merge + if version_attribute is not None: + params["versionAttribute"] = version_attribute # New in ArangoDB 3.9.6 and 3.10.2 if refill_index_caches is not None: @@ -2625,6 +2734,7 @@ def update( sync: Optional[bool] = None, silent: bool = False, refill_index_caches: Optional[bool] = None, + version_attribute: Optional[str] = None, ) -> Result[Union[bool, Json]]: """Update a document. @@ -2655,6 +2765,9 @@ def update( index caches if document insertions affect the edge index or cache-enabled persistent indexes. :type refill_index_caches: bool | None + :param version_attribute: support for simple external versioning + to document operations. + :type version_attribute: str :return: Document metadata (e.g. document key, revision) or True if parameter **silent** was set to True. :rtype: bool | dict @@ -2673,6 +2786,9 @@ def update( if sync is not None: params["waitForSync"] = sync + if version_attribute is not None: + params["versionAttribute"] = version_attribute + # New in ArangoDB 3.9.6 and 3.10.2 if refill_index_caches is not None: params["refillIndexCaches"] = refill_index_caches @@ -2708,6 +2824,7 @@ def replace( sync: Optional[bool] = None, silent: bool = False, refill_index_caches: Optional[bool] = None, + version_attribute: Optional[str] = None, ) -> Result[Union[bool, Json]]: """Replace a document. @@ -2733,6 +2850,9 @@ def replace( index caches if document insertions affect the edge index or cache-enabled persistent indexes. :type refill_index_caches: bool | None + :param version_attribute: support for simple external versioning to + document operations. + :type version_attribute: str :return: Document metadata (e.g. document key, revision) or True if parameter **silent** was set to True. :rtype: bool | dict @@ -2749,6 +2869,9 @@ def replace( if sync is not None: params["waitForSync"] = sync + if version_attribute is not None: + params["versionAttribute"] = version_attribute + # New in ArangoDB 3.9.6 and 3.10.2 if refill_index_caches is not None: params["refillIndexCaches"] = refill_index_caches diff --git a/arango/connection.py b/arango/connection.py index 3daa4585..d25d2f78 100644 --- a/arango/connection.py +++ b/arango/connection.py @@ -23,7 +23,7 @@ JWTRefreshError, ServerConnectionError, ) -from arango.http import HTTPClient +from arango.http import HTTPClient, RequestCompression from arango.request import Request from arango.resolver import HostResolver from arango.response import Response @@ -44,6 +44,8 @@ def __init__( http_client: HTTPClient, serializer: Callable[..., str], deserializer: Callable[[str], Any], + request_compression: Optional[RequestCompression] = None, + response_compression: Optional[str] = None, ) -> None: self._url_prefixes = [f"{host}/_db/{db_name}" for host in hosts] self._host_resolver = host_resolver @@ -53,6 +55,8 @@ def __init__( self._serializer = serializer self._deserializer = deserializer self._username: Optional[str] = None + self._request_compression = request_compression + self._response_compression = response_compression @property def db_name(self) -> str: @@ -133,6 +137,19 @@ def process_request( """ tries = 0 indexes_to_filter: Set[int] = set() + + data = self.normalize_data(request.data) + if ( + self._request_compression is not None + and isinstance(data, str) + and self._request_compression.needs_compression(data) + ): + request.headers["content-encoding"] = self._request_compression.encoding() + data = self._request_compression.compress(data) + + if self._response_compression is not None: + request.headers["accept-encoding"] = self._response_compression + while tries < self._host_resolver.max_tries: try: resp = self._http.send_request( @@ -140,7 +157,7 @@ def process_request( method=request.method, url=self._url_prefixes[host_index] + request.endpoint, params=request.params, - data=self.normalize_data(request.data), + data=data, headers=request.headers, auth=auth, ) @@ -243,6 +260,10 @@ class BasicConnection(BaseConnection): :type password: str :param http_client: User-defined HTTP client. :type http_client: arango.http.HTTPClient + :param: request_compression: The request compression algorithm. + :type request_compression: arango.http.RequestCompression | None + :param: response_compression: The response compression algorithm. + :type response_compression: str | None """ def __init__( @@ -256,6 +277,8 @@ def __init__( http_client: HTTPClient, serializer: Callable[..., str], deserializer: Callable[[str], Any], + request_compression: Optional[RequestCompression] = None, + response_compression: Optional[str] = None, ) -> None: super().__init__( hosts, @@ -265,6 +288,8 @@ def __init__( http_client, serializer, deserializer, + request_compression, + response_compression, ) self._username = username self._auth = (username, password) @@ -298,6 +323,10 @@ class JwtConnection(BaseConnection): :type password: str :param http_client: User-defined HTTP client. :type http_client: arango.http.HTTPClient + :param request_compression: The request compression algorithm. + :type request_compression: arango.http.RequestCompression | None + :param response_compression: The response compression algorithm. + :type response_compression: str | None """ def __init__( @@ -312,6 +341,8 @@ def __init__( username: Optional[str] = None, password: Optional[str] = None, user_token: Optional[str] = None, + request_compression: Optional[RequestCompression] = None, + response_compression: Optional[str] = None, ) -> None: super().__init__( hosts, @@ -321,6 +352,8 @@ def __init__( http_client, serializer, deserializer, + request_compression, + response_compression, ) self._username = username self._password = password @@ -439,6 +472,10 @@ class JwtSuperuserConnection(BaseConnection): :type http_client: arango.http.HTTPClient :param superuser_token: User generated token for superuser access. :type superuser_token: str + :param request_compression: The request compression algorithm. + :type request_compression: arango.http.RequestCompression | None + :param response_compression: The response compression algorithm. + :type response_compression: str | None """ def __init__( @@ -451,6 +488,8 @@ def __init__( serializer: Callable[..., str], deserializer: Callable[[str], Any], superuser_token: str, + request_compression: Optional[RequestCompression] = None, + response_compression: Optional[str] = None, ) -> None: super().__init__( hosts, @@ -460,6 +499,8 @@ def __init__( http_client, serializer, deserializer, + request_compression, + response_compression, ) self._auth_header = f"bearer {superuser_token}" diff --git a/arango/database.py b/arango/database.py index e15d2e07..3a76baa3 100644 --- a/arango/database.py +++ b/arango/database.py @@ -8,7 +8,7 @@ from datetime import datetime from numbers import Number -from typing import Any, List, Optional, Sequence, Union +from typing import Any, Dict, List, Optional, Sequence, Union from warnings import warn from arango.api import ApiGroup @@ -27,10 +27,12 @@ CollectionCreateError, CollectionDeleteError, CollectionListError, + DatabaseCompactError, DatabaseCreateError, DatabaseDeleteError, DatabaseListError, DatabasePropertiesError, + DatabaseSupportInfoError, GraphCreateError, GraphDeleteError, GraphListError, @@ -46,11 +48,16 @@ ServerEchoError, ServerEncryptionError, ServerEngineError, + ServerExecuteError, ServerLicenseGetError, ServerLicenseSetError, ServerLogLevelError, ServerLogLevelSetError, + ServerLogSettingError, + ServerLogSettingSetError, ServerMetricsError, + ServerModeError, + ServerModeSetError, ServerReadLogError, ServerReloadRoutingError, ServerRequiredDBVersionError, @@ -226,6 +233,36 @@ def response_handler(resp: Response) -> Json: return self._execute(request, response_handler) + def execute(self, command: str) -> Result[Any]: + """Execute raw Javascript command on the server. + + Executes the JavaScript code in the body on the server as + the body of a function with no arguments. If you have a + return statement then the return value you produce will be returned + as 'application/json'. + + NOTE: this method endpoint will only be usable if the server + was started with the option `--javascript.allow-admin-execute true`. + The default value of this option is false, which disables the execution + of user-defined code and disables this API endpoint entirely. + This is also the recommended setting for production. + + :param command: Javascript command to execute. + :type command: str + :return: Return value of **command**, if any. + :rtype: Any + :raise arango.exceptions.ServerExecuteError: If execution fails. + """ + request = Request(method="post", endpoint="/_admin/execute", data=command) + + def response_handler(resp: Response) -> Any: + if not resp.is_success: + raise ServerExecuteError(resp, request) + + return resp.body + + return self._execute(request, response_handler) + def execute_transaction( self, command: str, @@ -441,6 +478,47 @@ def response_handler(resp: Response) -> Json: return self._execute(request, response_handler) + def compact( + self, + change_level: Optional[bool] = None, + compact_bottom_most_level: Optional[bool] = None, + ) -> Result[Json]: + """Compact all databases. + + NOTE: This command can cause a full rewrite of all data in all databases, + which may take very long for large databases. It should thus only be used with + care and only when additional I/O load can be tolerated for a prolonged time. + + This method can be used to reclaim disk space after substantial data deletions + have taken place, by compacting the entire database system data. + + This method requires superuser access. + + :param change_level: Whether or not compacted data should be moved to + the minimum possible level. Default value is False. + :type change_level: bool | None + :param compact_bottom_most_level: Whether or not to compact the + bottom-most level of data. Default value is False. + :type compact_bottom_most_level: bool | None + :return: Collection compact. + :rtype: dict + :raise arango.exceptions.CollectionCompactError: If retrieval fails. + """ + data = {} + if change_level is not None: + data["changeLevel"] = change_level + if compact_bottom_most_level is not None: + data["compactBottomMostLevel"] = compact_bottom_most_level + + request = Request(method="put", endpoint="/_admin/compact", data=data) + + def response_handler(resp: Response) -> Json: + if resp.is_success: + return format_body(resp.body) + raise DatabaseCompactError(resp, request) + + return self._execute(request, response_handler) + def required_db_version(self) -> Result[str]: """Return required version of target database. @@ -512,6 +590,56 @@ def response_handler(resp: Response) -> str: return self._execute(request, response_handler) + def mode(self) -> Result[str]: + """Return the server mode (default or read-only) + + In a read-only server, all write operations will fail + with an error code of 1004 (ERROR_READ_ONLY). Creating or dropping + databases and collections will also fail with error code 11 (ERROR_FORBIDDEN). + + :return: Server mode. Possible values are "default" or "readonly". + :rtype: str + :raise arango.exceptions.ServerModeError: If retrieval fails. + """ + request = Request(method="get", endpoint="/_admin/server/mode") + + def response_handler(resp: Response) -> str: + if resp.is_success: + return str(resp.body["mode"]) + + raise ServerModeError(resp, request) + + return self._execute(request, response_handler) + + def set_mode(self, mode: str) -> Result[Json]: + """Set the server mode to read-only or default. + + Update mode information about a server. The JSON response will + contain a field mode with the value readonly or default. + In a read-only server all write operations will fail with an error + code of 1004 (ERROR_READ_ONLY). Creating or dropping of databases + and collections will also fail with error code 11 (ERROR_FORBIDDEN). + + This is a protected API. It requires authentication and administrative + server rights. + + :param mode: Server mode. Possible values are "default" or "readonly". + :type mode: str + :return: Server mode. + :rtype: str + :raise arango.exceptions.ServerModeSetError: If set fails. + """ + request = Request( + method="put", endpoint="/_admin/server/mode", data={"mode": mode} + ) + + def response_handler(resp: Response) -> Json: + if resp.is_success: + return format_body(resp.body) + raise ServerModeSetError(resp, request) + + return self._execute(request, response_handler) + def time(self) -> Result[datetime]: """Return server system time. @@ -528,14 +656,23 @@ def response_handler(resp: Response) -> datetime: return self._execute(request, response_handler) - def echo(self) -> Result[Json]: - """Return details of the last request (e.g. headers, payload). + def echo(self, body: Optional[Any] = None) -> Result[Json]: + """Return details of the last request (e.g. headers, payload), + or echo the given request body. + :param body: The body of the request. Can be of any type + and is simply forwarded. If not set, the details of the last + request are returned. + :type body: dict | list | str | int | float | None :return: Details of the last request. :rtype: dict :raise arango.exceptions.ServerEchoError: If retrieval fails. """ - request = Request(method="get", endpoint="/_admin/echo") + request = ( + Request(method="get", endpoint="/_admin/echo") + if body is None + else Request(method="post", endpoint="/_admin/echo", data=body) + ) def response_handler(resp: Response) -> Json: if not resp.is_success: @@ -751,6 +888,52 @@ def response_handler(resp: Response) -> Json: return self._execute(request, response_handler) + def log_settings(self) -> Result[Json]: + """Return the structured log settings. + + :return: Current log settings. False values are not returned. + :rtype: dict + """ + request = Request(method="get", endpoint="/_admin/log/structured") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerLogSettingError(resp, request) + result: Json = resp.body + return result + + return self._execute(request, response_handler) + + def set_log_settings(self, **kwargs: Dict[str, Any]) -> Result[Json]: + """Set the structured log settings. + + This method takes arbitrary keyword arguments where the keys are the + structured log parameters and the values are true or false, for either + enabling or disabling the parameters. + + .. code-block:: python + + arango.set_log_settings( + database=True, + url=True, + username=False, + ) + + :param kwargs: Structured log parameters. + :type kwargs: Dict[str, Any] + :return: New log settings. False values are not returned. + :rtype: dict + """ + request = Request(method="put", endpoint="/_admin/log/structured", data=kwargs) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerLogSettingSetError(resp, request) + result: Json = resp.body + return result + + return self._execute(request, response_handler) + def log_levels(self, server_id: Optional[str] = None) -> Result[Json]: """Return current logging levels. @@ -777,7 +960,7 @@ def response_handler(resp: Response) -> Json: return self._execute(request, response_handler) def set_log_levels( - self, server_id: Optional[str] = None, **kwargs: str + self, server_id: Optional[str] = None, **kwargs: Dict[str, Any] ) -> Result[Json]: """Set the logging levels. @@ -799,6 +982,8 @@ def set_log_levels( JWT authentication whereas Coordinators also support authentication using usernames and passwords. :type server_id: str | None + :param kwargs: Logging levels. + :type kwargs: Dict[str, Any] :return: New logging levels. :rtype: dict """ @@ -840,7 +1025,7 @@ def metrics(self) -> Result[str]: :return: Server metrics in Prometheus format. :rtype: str """ - request = Request(method="get", endpoint="/_admin/metrics") + request = Request(method="get", endpoint="/_admin/metrics/v2") def response_handler(resp: Response) -> str: if resp.is_success: @@ -2729,6 +2914,38 @@ def response_handler(resp: Response) -> bool: return self._execute(request, response_handler) + ########### + # Support # + ########### + + def support_info(self) -> Result[Json]: + """Return information about the deployment. + + Retrieves deployment information for support purposes. + The endpoint returns data about the ArangoDB version used, + the host (operating system, server ID, CPU and storage capacity, + current utilization, a few metrics) and the other servers in the + deployment (in case of Active Failover or cluster deployments). + + NOTE: This method can only be accessed from inside the **_system** database. + The is a policy control startup option `--server.support-info-api` that controls + if and to whom the API is made available. + + :return: Deployment information. + :rtype: dict + :raise arango.exceptions.DatabaseSupportInfoError: If retrieval fails. + """ + request = Request(method="get", endpoint="/_admin/support-info") + + def response_handler(resp: Response) -> Json: + if resp.is_success: + result: Json = resp.body + return result + + raise DatabaseSupportInfoError(resp, request) + + return self._execute(request, response_handler) + class StandardDatabase(Database): """Standard database API wrapper.""" @@ -2785,6 +3002,14 @@ def begin_batch_execution( """ return BatchDatabase(self._conn, return_result, max_workers) + def fetch_transaction(self, transaction_id: str) -> "TransactionDatabase": + """Fetch an existing transaction. + + :param transaction_id: The ID of the existing transaction. + :type transaction_id: str + """ + return TransactionDatabase(connection=self._conn, transaction_id=transaction_id) + def begin_transaction( self, read: Union[str, Sequence[str], None] = None, @@ -2962,6 +3187,9 @@ class TransactionDatabase(Database): :type lock_timeout: int | None :param max_size: Max transaction size in bytes. :type max_size: int | None + :param transaction_id: Initialize using an existing transaction instead of creating + a new transaction. + :type transaction_id: str | None """ def __init__( @@ -2974,6 +3202,7 @@ def __init__( allow_implicit: Optional[bool] = None, lock_timeout: Optional[int] = None, max_size: Optional[int] = None, + transaction_id: Optional[str] = None, ) -> None: self._executor: TransactionApiExecutor super().__init__( @@ -2987,6 +3216,7 @@ def __init__( allow_implicit=allow_implicit, lock_timeout=lock_timeout, max_size=max_size, + transaction_id=transaction_id, ), ) diff --git a/arango/exceptions.py b/arango/exceptions.py index e03ec95a..8c404d0b 100644 --- a/arango/exceptions.py +++ b/arango/exceptions.py @@ -360,6 +360,14 @@ class DatabaseDeleteError(ArangoServerError): """Failed to delete database.""" +class DatabaseSupportInfoError(ArangoServerError): + """Failed to retrieve support info for deployment.""" + + +class DatabaseCompactError(ArangoServerError): + """Failed to compact databases.""" + + ####################### # Document Exceptions # ####################### @@ -666,10 +674,18 @@ class ServerLogLevelError(ArangoServerError): """Failed to retrieve server log levels.""" +class ServerLogSettingError(ArangoServerError): + """Failed to retrieve server log settings.""" + + class ServerLogLevelSetError(ArangoServerError): """Failed to set server log levels.""" +class ServerLogSettingSetError(ArangoServerError): + """Failed to set server log settings.""" + + class ServerReloadRoutingError(ArangoServerError): """Failed to reload routing details.""" @@ -683,7 +699,15 @@ class ServerMetricsError(ArangoServerError): class ServerRoleError(ArangoServerError): - """Failed to retrieve server role in a cluster.""" + """Failed to retrieve server role.""" + + +class ServerModeError(ArangoServerError): + """Failed to retrieve server mode.""" + + +class ServerModeSetError(ArangoServerError): + """Failed to set server mode.""" class ServerTLSError(ArangoServerError): @@ -706,6 +730,10 @@ class ServerAvailableOptionsGetError(ArangoServerError): """Failed to retrieve available server options.""" +class ServerExecuteError(ArangoServerError): + """Failed to execute raw JavaScript command.""" + + ##################### # Task Exceptions # ##################### @@ -752,6 +780,10 @@ class TransactionAbortError(ArangoServerError): """Failed to abort transaction.""" +class TransactionFetchError(ArangoServerError): + """Failed to fetch existing transaction.""" + + class TransactionListError(ArangoServerError): """Failed to retrieve transactions.""" @@ -976,7 +1008,11 @@ class ClusterServerIDError(ArangoServerError): class ClusterServerRoleError(ArangoServerError): - """Failed to retrieve server role.""" + """Failed to retrieve server role in a cluster.""" + + +class ClusterServerModeError(ArangoServerError): + """Failed to retrieve server mode in a cluster.""" class ClusterServerStatisticsError(ArangoServerError): diff --git a/arango/executor.py b/arango/executor.py index 47ac4a19..b854c671 100644 --- a/arango/executor.py +++ b/arango/executor.py @@ -19,6 +19,7 @@ OverloadControlExecutorError, TransactionAbortError, TransactionCommitError, + TransactionFetchError, TransactionInitError, TransactionStatusError, ) @@ -241,6 +242,9 @@ class TransactionApiExecutor: :type max_size: int :param allow_dirty_read: Allow reads from followers in a cluster. :type allow_dirty_read: bool | None + :param transaction_id: Initialize using an existing transaction instead of starting + a new transaction. + :type transaction_id: str | None """ def __init__( @@ -254,6 +258,7 @@ def __init__( lock_timeout: Optional[int] = None, max_size: Optional[int] = None, allow_dirty_read: bool = False, + transaction_id: Optional[str] = None, ) -> None: self._conn = connection @@ -275,19 +280,29 @@ def __init__( if max_size is not None: data["maxTransactionSize"] = max_size - request = Request( - method="post", - endpoint="/_api/transaction/begin", - data=data, - headers={"x-arango-allow-dirty-read": "true"} if allow_dirty_read else None, - ) - resp = self._conn.send_request(request) + if transaction_id is None: + request = Request( + method="post", + endpoint="/_api/transaction/begin", + data=data, + headers=( + {"x-arango-allow-dirty-read": "true"} if allow_dirty_read else None + ), + ) + resp = self._conn.send_request(request) - if not resp.is_success: - raise TransactionInitError(resp, request) + if not resp.is_success: + raise TransactionInitError(resp, request) + + result = resp.body["result"] + self._id: str = result["id"] + else: + self._id = transaction_id - result: Json = resp.body["result"] - self._id: str = result["id"] + try: + self.status() + except TransactionStatusError as err: + raise TransactionFetchError(err.response, err.request) @property def context(self) -> str: diff --git a/arango/formatter.py b/arango/formatter.py index 2058b1d6..6a730b87 100644 --- a/arango/formatter.py +++ b/arango/formatter.py @@ -101,6 +101,8 @@ def format_index(body: Json) -> Json: result["writebuffer_active"] = body["writebufferActive"] if "writebufferSizeMax" in body: result["writebuffer_max_size"] = body["writebufferSizeMax"] + if "fieldValueTypes" in body: + result["field_value_types"] = body["fieldValueTypes"] return verify_format(body, result) diff --git a/arango/http.py b/arango/http.py index c5eb0acd..d0b17939 100644 --- a/arango/http.py +++ b/arango/http.py @@ -1,6 +1,13 @@ -__all__ = ["HTTPClient", "DefaultHTTPClient", "DEFAULT_REQUEST_TIMEOUT"] +__all__ = [ + "HTTPClient", + "DefaultHTTPClient", + "DeflateRequestCompression", + "RequestCompression", + "DEFAULT_REQUEST_TIMEOUT", +] import typing +import zlib from abc import ABC, abstractmethod from typing import Any, MutableMapping, Optional, Tuple, Union @@ -40,7 +47,7 @@ def send_request( url: str, headers: Optional[Headers] = None, params: Optional[MutableMapping[str, str]] = None, - data: Union[str, MultipartEncoder, None] = None, + data: Union[str, bytes, MultipartEncoder, None] = None, auth: Optional[Tuple[str, str]] = None, ) -> Response: """Send an HTTP request. @@ -58,7 +65,7 @@ def send_request( :param params: URL (query) parameters. :type params: dict :param data: Request payload. - :type data: str | MultipartEncoder | None + :type data: str | bytes | MultipartEncoder | None :param auth: Username and password. :type auth: tuple :returns: HTTP response. @@ -198,7 +205,7 @@ def send_request( url: str, headers: Optional[Headers] = None, params: Optional[MutableMapping[str, str]] = None, - data: Union[str, MultipartEncoder, None] = None, + data: Union[str, bytes, MultipartEncoder, None] = None, auth: Optional[Tuple[str, str]] = None, ) -> Response: """Send an HTTP request. @@ -214,7 +221,7 @@ def send_request( :param params: URL (query) parameters. :type params: dict :param data: Request payload. - :type data: str | MultipartEncoder | None + :type data: str | bytes | MultipartEncoder | None :param auth: Username and password. :type auth: tuple :returns: HTTP response. @@ -237,3 +244,75 @@ def send_request( status_text=response.reason, raw_body=response.text, ) + + +class RequestCompression(ABC): # pragma: no cover + """Abstract base class for request compression.""" + + @abstractmethod + def needs_compression(self, data: str) -> bool: + """ + :param data: Data to be compressed. + :type data: str + :returns: True if the data needs to be compressed. + :rtype: bool + """ + raise NotImplementedError + + @abstractmethod + def compress(self, data: str) -> bytes: + """Compress the data. + + :param data: Data to be compressed. + :type data: str + :returns: Compressed data. + :rtype: bytes + """ + raise NotImplementedError + + @abstractmethod + def encoding(self) -> str: + """Return the content encoding exactly as it should + appear in the headers. + + :returns: Content encoding. + :rtype: str + """ + raise NotImplementedError + + +class DeflateRequestCompression(RequestCompression): + """Compress requests using the 'deflate' algorithm.""" + + def __init__(self, threshold: int = 1024, level: int = 6): + """ + :param threshold: Will compress requests to the server if + the size of the request body (in bytes) is at least the value of this + option. + :type threshold: int + :param level: Compression level, in 0-9 or -1. + :type level: int + """ + self._threshold = threshold + self._level = level + + def needs_compression(self, data: str) -> bool: + """ + :param data: Data to be compressed. + :type data: str + :returns: True if the data needs to be compressed. + :rtype: bool + """ + return len(data) >= self._threshold + + def compress(self, data: str) -> bytes: + """ + :param data: Data to be compressed. + :type data: str + :returns: Compressed data. + :rtype: bytes + """ + return zlib.compress(data.encode("utf-8"), level=self._level) + + def encoding(self) -> str: + return "deflate" diff --git a/arango/replication.py b/arango/replication.py index 0ecfc20e..d5fae457 100644 --- a/arango/replication.py +++ b/arango/replication.py @@ -180,13 +180,15 @@ def response_handler(resp: Response) -> Json: if resp.is_success: result = format_replication_header(resp.headers) result["content"] = [ - [ - self._conn.deserialize(line) - for line in resp.body.split("\n") - if line - ] - if deserialize - else resp.body + ( + [ + self._conn.deserialize(line) + for line in resp.body.split("\n") + if line + ] + if deserialize + else resp.body + ) ] return result diff --git a/docs/admin.rst b/docs/admin.rst index 744b44b3..e1c1efeb 100644 --- a/docs/admin.rst +++ b/docs/admin.rst @@ -32,9 +32,22 @@ database. # Retrieve the server time. sys_db.time() - # Retrieve the server role in a cluster. + # Retrieve the server role. sys_db.role() + # Retrieve the server role in a cluster. + sys_db.cluster.server_role() + + # Retrieve the server mode. + sys_db.mode() + + # Retrieve the server mode in a cluster. + sys_db.cluster.server_mode() + + # Set the server mode. + sys_db.set_mode('readonly') + sys_db.set_mode('default') + # Retrieve the server statistics. sys_db.statistics() @@ -54,6 +67,9 @@ database. # Echo the last request. sys_db.echo() + # Echo a request + sys_db.echo('request goes here') + # Reload the routing collection. sys_db.reload_routing() diff --git a/docs/cluster.rst b/docs/cluster.rst index fbb3bb5e..fdb45bca 100644 --- a/docs/cluster.rst +++ b/docs/cluster.rst @@ -86,8 +86,13 @@ Below is an example on how to manage clusters using python-arango. cluster.server_engine(server_id) cluster.server_version(server_id) cluster.server_statistics(server_id) + cluster.server_maintenance_mode(server_id) - # Toggle maintenance mode (allowed values are "on" and "off"). + # Toggle Server maintenance mode (allowed values are "normal" and "maintenance"). + cluster.toggle_server_maintenance_mode(server_id, 'normal') + cluster.toggle_server_maintenance_mode(server_id, 'maintenance', timeout=30) + + # Toggle Cluster maintenance mode (allowed values are "on" and "off"). cluster.toggle_maintenance_mode('on') cluster.toggle_maintenance_mode('off') diff --git a/docs/compression.rst b/docs/compression.rst new file mode 100644 index 00000000..526e20f1 --- /dev/null +++ b/docs/compression.rst @@ -0,0 +1,40 @@ +Compression +------------ + +The :ref:`ArangoClient` lets you define the preferred compression policy for request and responses. By default +compression is disabled. You can change this by setting the `request_compression` and `response_compression` parameters +when creating the client. Currently, only the "deflate" compression algorithm is supported. + +.. testcode:: + + from arango import ArangoClient + + from arango.http import DeflateRequestCompression + + client = ArangoClient( + hosts='http://localhost:8529', + request_compression=DeflateRequestCompression(), + response_compression="deflate" + ) + +Furthermore, you can customize the request compression policy by defining the minimum size of the request body that +should be compressed and the desired compression level. For example, the following code sets the minimum size to 2 KB +and the compression level to 8: + +.. code-block:: python + + client = ArangoClient( + hosts='http://localhost:8529', + request_compression=DeflateRequestCompression( + threshold=2048, + level=8), + ) + +If you want to implement your own compression policy, you can do so by implementing the +:class:`arango.http.RequestCompression` interface. + +.. note:: + The `response_compression` parameter is only used to inform the server that the client prefers compressed responses + (in the form of an *Accept-Encoding* header). Note that the server may or may not honor this preference, depending + on how it is configured. This can be controlled by setting the `--http.compress-response-threshold` option to + a value greater than 0 when starting the ArangoDB server. diff --git a/docs/conf.py b/docs/conf.py index 77fe6bb8..24e5586a 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -4,7 +4,7 @@ sys.path.insert(0, os.path.abspath("..")) project = "python-arango" -copyright = "2016-2022, Joohwan Oh" +copyright = "2016-2024, Joohwan Oh" author = "Joohwan Oh" extensions = [ "sphinx_rtd_theme", diff --git a/docs/contributing.rst b/docs/contributing.rst index 35a72657..2093f72f 100644 --- a/docs/contributing.rst +++ b/docs/contributing.rst @@ -7,16 +7,16 @@ Requirements Before submitting a pull request on GitHub_, please make sure you meet the following requirements: -* The pull request points to dev_ branch. +* The pull request points to main_ branch. * Changes are squashed into a single commit. I like to use git rebase for this. * Commit message is in present tense. For example, "Fix bug" is good while "Fixed bug" is not. * Sphinx_-compatible docstrings. * PEP8_ compliance. * No missing docstrings or commented-out lines. -* Test coverage_ remains at %100. If a piece of code is trivial and does not +* Test coverage remains at %100. If a piece of code is trivial and does not need unit tests, use this_ to exclude it from coverage. -* No build failures on `Travis CI`_. Builds automatically trigger on pull +* No build failures. Builds automatically trigger on pull request submissions. * Documentation is kept up-to-date with the new changes (see below). @@ -40,7 +40,7 @@ To ensure PEP8_ compliance, run flake8_: .. code-block:: bash ~$ pip install flake8 - ~$ git clone https://github.com/ArangoDB-Community/python-arango.git + ~$ git clone https://github.com/arangodb/python-arango.git ~$ cd python-arango ~$ flake8 @@ -57,7 +57,7 @@ To run the test suite (use your own host, port and root password): .. code-block:: bash ~$ pip install pytest - ~$ git clone https://github.com/ArangoDB-Community/python-arango.git + ~$ git clone https://github.com/arangodb/python-arango.git ~$ cd python-arango ~$ py.test --complete --host=127.0.0.1 --port=8529 --passwd=passwd @@ -66,7 +66,7 @@ To run the test suite with coverage report: .. code-block:: bash ~$ pip install coverage pytest pytest-cov - ~$ git clone https://github.com/ArangoDB-Community/python-arango.git + ~$ git clone https://github.com/arangodb/python-arango.git ~$ cd python-arango ~$ py.test --complete --host=127.0.0.1 --port=8529 --passwd=passwd --cov=kq @@ -82,18 +82,16 @@ Sphinx_. To build an HTML version on your local machine: .. code-block:: bash ~$ pip install sphinx sphinx_rtd_theme - ~$ git clone https://github.com/ArangoDB-Community/python-arango.git + ~$ git clone https://github.com/arangodb/python-arango.git ~$ cd python-arango ~$ python -m sphinx -b html -W docs docs/_build/ # Open build/index.html in a browser As always, thank you for your contribution! -.. _dev: https://github.com/joowani/python-arango/tree/dev -.. _GitHub: https://github.com/joowani/python-arango +.. _main: https://github.com/arangodb/python-arango/tree/main +.. _GitHub: https://github.com/arangodb/python-arango .. _PEP8: https://www.python.org/dev/peps/pep-0008/ -.. _coverage: https://coveralls.io/github/joowani/python-arango .. _this: http://coverage.readthedocs.io/en/latest/excluding.html -.. _Travis CI: https://travis-ci.org/joowani/python-arango .. _Sphinx: https://github.com/sphinx-doc/sphinx .. _flake8: http://flake8.pycqa.org .. _here: http://flake8.pycqa.org/en/latest/user/violations.html#in-line-ignoring-errors diff --git a/docs/index.rst b/docs/index.rst index 232103b0..09f96f51 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -10,7 +10,7 @@ Welcome to the documentation for **python-arango**, a Python driver for ArangoDB Requirements ============= -- ArangoDB version 3.9+ +- ArangoDB version 3.11+ - Python version 3.8+ Installation @@ -23,42 +23,75 @@ Installation Contents ======== +Basics + .. toctree:: :maxdepth: 1 overview database collection - document - schema indexes + document graph - aql simple - cursor + aql + +Specialized Features + +.. toctree:: + :maxdepth: 1 + + pregel + foxx + replication + transaction + cluster + analyzer + view + wal + +API Executions + +.. toctree:: + :maxdepth: 1 + async batch overload - transaction + +Administration + +.. toctree:: + :maxdepth: 1 + admin user + +Miscellaneous + +.. toctree:: + :maxdepth: 1 + task - wal - pregel - foxx - view - analyzer threading certificates errors logging auth http - replication - cluster + compression serializer + schema + cursor backup errno + +Development + +.. toctree:: + :maxdepth: 1 + contributing specs diff --git a/docs/pregel.rst b/docs/pregel.rst index 45c55f4a..5aad7abe 100644 --- a/docs/pregel.rst +++ b/docs/pregel.rst @@ -1,6 +1,10 @@ Pregel ------ +.. warning:: + Starting from ArangoDB 3.12, the Pregel API has been dropped. + Currently, the driver still supports it for the 3.10 and 3.11 versions, but note that it will be dropped eventually. + Python-arango provides support for **Pregel**, ArangoDB module for distributed iterative graph processing. For more information, refer to `ArangoDB manual`_. @@ -8,7 +12,7 @@ iterative graph processing. For more information, refer to `ArangoDB manual`_. **Example:** -.. testcode:: +.. code-block:: python from arango import ArangoClient diff --git a/docs/simple.rst b/docs/simple.rst index 4d483e65..8f28f634 100644 --- a/docs/simple.rst +++ b/docs/simple.rst @@ -1,8 +1,6 @@ Simple Queries -------------- -.. caution:: There is no option to add a TTL (Time to live) or batch size optimizations to the Simple Queries due to how Arango is handling simple collection HTTP requests. Your request may time out and you'll see a CursorNextError exception. The AQL queries provide full functionality. - Here is an example of using ArangoDB's **simply queries**: .. testcode:: diff --git a/docs/specs.rst b/docs/specs.rst index b4f61854..87e1d184 100644 --- a/docs/specs.rst +++ b/docs/specs.rst @@ -103,6 +103,12 @@ DefaultHTTPClient .. autoclass:: arango.http.DefaultHTTPClient :members: +DeflateRequestCompression +========================= + +.. autoclass:: arango.http.DeflateRequestCompression + :members: + .. _EdgeCollection: EdgeCollection diff --git a/docs/transaction.rst b/docs/transaction.rst index 18d60a68..8bdeb18d 100644 --- a/docs/transaction.rst +++ b/docs/transaction.rst @@ -75,6 +75,16 @@ logical unit of work (ACID compliant). assert 'Lily' not in col assert len(col) == 3 # transaction is aborted so txn_col cannot be used + # Fetch an existing transaction. Useful if you have received a Transaction ID + # from some other part of your system or an external system. + original_txn = db.begin_transaction(write='students') + txn_col = original_txn.collection('students') + assert '_rev' in txn_col.insert({'_key': 'Chip'}) + txn_db = db.fetch_transaction(original_txn.transaction_id) + txn_col = txn_db.collection('students') + assert '_rev' in txn_col.insert({'_key': 'Alya'}) + txn_db.abort_transaction() + See :ref:`TransactionDatabase` for API specification. Alternatively, you can use diff --git a/pyproject.toml b/pyproject.toml index 7c0d1244..dee5fe8b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -66,7 +66,7 @@ dev = [ "arango" = ["py.typed"] [project.urls] -homepage = "https://github.com/ArangoDB-Community/python-arango" +homepage = "https://github.com/arangodb/python-arango" [tool.setuptools] packages = ["arango"] diff --git a/tests/conftest.py b/tests/conftest.py index da95e2ef..184269d7 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -134,7 +134,7 @@ def pytest_configure(config): global_data.username = username global_data.password = password global_data.db_name = tst_db_name - global_data.db_version = version.parse(db_version) + global_data.db_version = version.parse(db_version.split("-")[0]) global_data.sys_db = sys_db global_data.tst_db = tst_db global_data.bad_db = bad_db diff --git a/tests/static/cluster-3.10.conf b/tests/static/cluster-3.10.conf index 573c030a..d7732c90 100644 --- a/tests/static/cluster-3.10.conf +++ b/tests/static/cluster-3.10.conf @@ -10,3 +10,4 @@ jwt-secret = /tests/static/keyfile [args] all.database.password = passwd all.log.api-enabled = true +all.javascript.allow-admin-execute = true diff --git a/tests/static/cluster.conf b/tests/static/cluster.conf index 182f3d17..86f78556 100644 --- a/tests/static/cluster.conf +++ b/tests/static/cluster.conf @@ -11,3 +11,4 @@ jwt-secret = /tests/static/keyfile all.database.password = passwd all.database.extended-names = true all.log.api-enabled = true +all.javascript.allow-admin-execute = true diff --git a/tests/static/single-3.10.conf b/tests/static/single-3.10.conf index c982303b..09d1d9f3 100644 --- a/tests/static/single-3.10.conf +++ b/tests/static/single-3.10.conf @@ -8,3 +8,4 @@ jwt-secret = /tests/static/keyfile [args] all.database.password = passwd +all.javascript.allow-admin-execute = true diff --git a/tests/static/single.conf b/tests/static/single.conf index e880f9d5..df45cb76 100644 --- a/tests/static/single.conf +++ b/tests/static/single.conf @@ -9,3 +9,4 @@ jwt-secret = /tests/static/keyfile [args] all.database.password = passwd all.database.extended-names = true +all.javascript.allow-admin-execute = true diff --git a/tests/test_analyzer.py b/tests/test_analyzer.py index 63627251..647fa333 100644 --- a/tests/test_analyzer.py +++ b/tests/test_analyzer.py @@ -1,5 +1,3 @@ -from packaging import version - from arango.exceptions import ( AnalyzerCreateError, AnalyzerDeleteError, @@ -9,7 +7,7 @@ from tests.helpers import assert_raises, generate_analyzer_name -def test_analyzer_management(db, bad_db, cluster, enterprise, db_version): +def test_analyzer_management(db, bad_db, cluster, enterprise): analyzer_name = generate_analyzer_name() full_analyzer_name = db.name + "::" + analyzer_name bad_analyzer_name = generate_analyzer_name() @@ -60,7 +58,7 @@ def test_analyzer_management(db, bad_db, cluster, enterprise, db_version): assert db.delete_analyzer(analyzer_name, ignore_missing=True) is False # Test create geo_s2 analyzer (EE only) - if enterprise and db_version >= version.parse("3.10.5"): + if enterprise: analyzer_name = generate_analyzer_name() result = db.create_analyzer(analyzer_name, "geo_s2", {}) assert result["type"] == "geo_s2" diff --git a/tests/test_aql.py b/tests/test_aql.py index 65b7365e..885d6a4e 100644 --- a/tests/test_aql.py +++ b/tests/test_aql.py @@ -199,8 +199,7 @@ def test_aql_query_management(db_version, db, bad_db, col, docs): assert "state" in query assert "bind_vars" in query assert "runtime" in query - if db_version >= version.parse("3.11"): - assert "peak_memory_usage" in query + assert "peak_memory_usage" in query assert len(queries) == 2 # Test list queries with bad database @@ -247,7 +246,7 @@ def test_aql_query_management(db_version, db, bad_db, col, docs): def test_aql_query_force_one_shard_attribute_value(db, db_version, enterprise, cluster): - if db_version < version.parse("3.10") or not enterprise or not cluster: + if not enterprise or not cluster: return name = generate_col_name() diff --git a/tests/test_client.py b/tests/test_client.py index 5faa84db..e43180f7 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -9,9 +9,14 @@ from arango.client import ArangoClient from arango.database import StandardDatabase from arango.exceptions import ServerConnectionError -from arango.http import DefaultHTTPClient +from arango.http import DefaultHTTPClient, DeflateRequestCompression from arango.resolver import FallbackHostResolver, RandomHostResolver, SingleHostResolver -from tests.helpers import generate_db_name, generate_string, generate_username +from tests.helpers import ( + generate_col_name, + generate_db_name, + generate_string, + generate_username, +) def test_client_attributes(): @@ -184,3 +189,56 @@ def test_can_serialize_deserialize_client() -> None: client_pstr = pickle.dumps(client) client2 = pickle.loads(client_pstr) assert len(client2._sessions) > 0 + + +def test_client_compression(db, username, password): + class CheckCompression: + def __init__(self, should_compress: bool): + self.should_compress = should_compress + + def check(self, headers): + if self.should_compress: + assert headers["content-encoding"] == "deflate" + else: + assert "content-encoding" not in headers + + class MyHTTPClient(DefaultHTTPClient): + def __init__(self, compression_checker: CheckCompression) -> None: + super().__init__() + self.checker = compression_checker + + def send_request( + self, session, method, url, headers=None, params=None, data=None, auth=None + ): + self.checker.check(headers) + return super().send_request( + session, method, url, headers, params, data, auth + ) + + checker = CheckCompression(should_compress=False) + + # should not compress, as threshold is 0 + client = ArangoClient( + hosts="http://127.0.0.1:8529", + http_client=MyHTTPClient(compression_checker=checker), + response_compression="gzip", + ) + db = client.db(db.name, username, password) + col = db.create_collection(generate_col_name()) + col.insert({"_key": "1"}) + + # should not compress, as size of payload is less than threshold + checker = CheckCompression(should_compress=False) + client = ArangoClient( + hosts="http://127.0.0.1:8529", + http_client=MyHTTPClient(compression_checker=checker), + request_compression=DeflateRequestCompression(250, level=7), + response_compression="deflate", + ) + db = client.db(db.name, username, password) + col = db.create_collection(generate_col_name()) + col.insert({"_key": "2"}) + + # should compress + checker.should_compress = True + col.insert({"_key": "3" * 250}) diff --git a/tests/test_cluster.py b/tests/test_cluster.py index bbc31778..d5ab4365 100644 --- a/tests/test_cluster.py +++ b/tests/test_cluster.py @@ -1,7 +1,7 @@ +import time import warnings import pytest -from packaging import version from arango.errno import DATABASE_NOT_FOUND, FORBIDDEN from arango.exceptions import ( @@ -12,6 +12,7 @@ ClusterServerCountError, ClusterServerEngineError, ClusterServerIDError, + ClusterServerModeError, ClusterServerRoleError, ClusterServerStatisticsError, ClusterServerVersionError, @@ -43,6 +44,18 @@ def test_cluster_server_role(sys_db, bad_db, cluster): assert err.value.error_code in {FORBIDDEN, DATABASE_NOT_FOUND} +def test_cluster_server_mode(sys_db, bad_db, cluster): + if not cluster: + pytest.skip("Only tested in a cluster setup") + + result = sys_db.cluster.server_mode() + assert result == "default" + + with assert_raises(ClusterServerModeError) as err: + bad_db.cluster.server_mode() + assert err.value.error_code in {FORBIDDEN, DATABASE_NOT_FOUND} + + def test_cluster_health(sys_db, bad_db, cluster): if not cluster: pytest.skip("Only tested in a cluster setup") @@ -99,6 +112,37 @@ def test_cluster_server_statistics(sys_db, bad_db, cluster): assert err.value.error_code in {FORBIDDEN, DATABASE_NOT_FOUND} +def test_cluster_server_maintenance_mode(sys_db, bad_db, cluster): + if not cluster: + pytest.skip("Only tested in a cluster setup") + + # Must be a DBServer + health = sys_db.cluster.health() + server_id = None + for server_id, info in health["Health"].items(): + if info["Role"] == "DBServer": + server_id = server_id + break + if server_id is None: + pytest.skip("No DBServer found in cluster") + + result = sys_db.cluster.server_maintenance_mode(server_id) + assert result == {} + + with assert_raises(ClusterMaintenanceModeError) as err: + bad_db.cluster.server_maintenance_mode(server_id) + assert err.value.error_code in {FORBIDDEN, DATABASE_NOT_FOUND} + + sys_db.cluster.toggle_server_maintenance_mode(server_id, "maintenance", timeout=2) + result = sys_db.cluster.server_maintenance_mode(server_id) + assert "Mode" in result + assert "Until" in result + + time.sleep(5) + result = sys_db.cluster.server_maintenance_mode(server_id) + assert result == {} + + def test_cluster_toggle_maintenance_mode(sys_db, bad_db, cluster): if not cluster: pytest.skip("Only tested in a cluster setup") @@ -140,13 +184,10 @@ def test_cluster_server_count(db, bad_db, cluster): assert err.value.error_code in {FORBIDDEN, DATABASE_NOT_FOUND} -def test_cluster_rebalance(sys_db, bad_db, cluster, db_version): +def test_cluster_rebalance(sys_db, bad_db, cluster): if not cluster: pytest.skip("Only tested in a cluster setup") - if db_version < version.parse("3.10.0"): - pytest.skip("Only tested on ArangoDB 3.10+") - # Test imbalance retrieval imbalance = sys_db.cluster.calculate_imbalance() assert "leader" in imbalance diff --git a/tests/test_collection.py b/tests/test_collection.py index a2d07587..65860c36 100644 --- a/tests/test_collection.py +++ b/tests/test_collection.py @@ -1,5 +1,4 @@ import pytest -from packaging import version from arango.client import ArangoClient from arango.collection import StandardCollection @@ -316,21 +315,15 @@ def special_db_names(sys_db): pass -def test_collection_utf8(db, db_version, special_collection_names): - if db_version < version.parse("3.11.0"): - pytest.skip("UTF8 collection names require ArangoDB 3.11+") - +def test_collection_utf8(db, special_collection_names): for name in special_collection_names: create_and_delete_collection(db, name) # Not sure if this belongs in here or in `test_database.py`... def test_database_and_collection_utf8( - sys_db, db_version, special_collection_names, special_db_names + sys_db, special_collection_names, special_db_names ): - if db_version < version.parse("3.11.0"): - pytest.skip("UTF8 collection names require ArangoDB 3.11+") - client = ArangoClient(hosts="http://127.0.0.1:8529") for db_name in special_db_names: username = generate_username() diff --git a/tests/test_cursor.py b/tests/test_cursor.py index e03eae32..e6fe4713 100644 --- a/tests/test_cursor.py +++ b/tests/test_cursor.py @@ -1,5 +1,4 @@ import pytest -from packaging import version from arango.exceptions import ( CursorCloseError, @@ -263,7 +262,7 @@ def test_cursor_manual_fetch_and_pop(db, col, docs): assert err.value.message == "current batch is empty" -def test_cursor_retry_disabled(db, col, docs, db_version): +def test_cursor_retry_disabled(db, col, docs): cursor = db.aql.execute( f"FOR d IN {col.name} SORT d._key RETURN d", count=True, @@ -282,8 +281,7 @@ def test_cursor_retry_disabled(db, col, docs, db_version): # The next batch ID should have no effect cursor._next_batch_id = "2" result = cursor.fetch() - if db_version >= version.parse("3.11.1"): - assert result["next_batch_id"] == "4" + assert result["next_batch_id"] == "4" doc = cursor.pop() assert clean_doc(doc) == docs[2] @@ -308,28 +306,25 @@ def test_cursor_retry(db, col, docs, db_version): result = cursor.fetch() assert result["id"] == cursor.id - if db_version >= version.parse("3.11.0"): - assert result["next_batch_id"] == "3" + assert result["next_batch_id"] == "3" doc = cursor.pop() assert clean_doc(doc) == docs[1] assert cursor.empty() # Decrease the next batch ID as if the previous fetch failed - if db_version >= version.parse("3.11.0"): - cursor._next_batch_id = "2" - result = cursor.fetch() - assert result["id"] == cursor.id - assert result["next_batch_id"] == "3" - doc = cursor.pop() - assert clean_doc(doc) == docs[1] - assert cursor.empty() + cursor._next_batch_id = "2" + result = cursor.fetch() + assert result["id"] == cursor.id + assert result["next_batch_id"] == "3" + doc = cursor.pop() + assert clean_doc(doc) == docs[1] + assert cursor.empty() # Fetch the next batches normally for batch in range(2, 5): result = cursor.fetch() assert result["id"] == cursor.id - if db_version >= version.parse("3.11.0"): - assert result["next_batch_id"] == str(batch + 2) + assert result["next_batch_id"] == str(batch + 2) doc = cursor.pop() assert clean_doc(doc) == docs[batch] @@ -340,17 +335,12 @@ def test_cursor_retry(db, col, docs, db_version): doc = cursor.pop() assert clean_doc(doc) == docs[-1] - if db_version >= version.parse("3.11.0"): - # We should be able to fetch the last batch again - cursor.fetch() - doc = cursor.pop() - assert clean_doc(doc) == docs[-1] + # We should be able to fetch the last batch again + cursor.fetch() + doc = cursor.pop() + assert clean_doc(doc) == docs[-1] - if db_version >= version.parse("3.11.0"): - assert cursor.close() - else: - with pytest.raises(CursorCloseError): - cursor.close() + assert cursor.close() def test_cursor_no_count(db, col): diff --git a/tests/test_database.py b/tests/test_database.py index 94db3b68..a278234b 100644 --- a/tests/test_database.py +++ b/tests/test_database.py @@ -1,7 +1,6 @@ from datetime import datetime import pytest -from packaging import version from arango.aql import AQL from arango.backup import Backup @@ -13,10 +12,12 @@ USE_SYSTEM_DATABASE, ) from arango.exceptions import ( + DatabaseCompactError, DatabaseCreateError, DatabaseDeleteError, DatabaseListError, DatabasePropertiesError, + DatabaseSupportInfoError, ServerDetailsError, ServerEchoError, ServerEngineError, @@ -24,6 +25,7 @@ ServerLogLevelError, ServerLogLevelSetError, ServerMetricsError, + ServerModeSetError, ServerReadLogError, ServerReloadRoutingError, ServerRequiredDBVersionError, @@ -37,7 +39,12 @@ from arango.pregel import Pregel from arango.replication import Replication from arango.wal import WAL -from tests.helpers import assert_raises, generate_db_name +from tests.helpers import ( + assert_raises, + generate_col_name, + generate_db_name, + generate_jwt, +) def test_database_attributes(db, username): @@ -57,7 +64,7 @@ def test_database_attributes(db, username): assert isinstance(db.wal, WAL) -def test_database_misc_methods(sys_db, db, bad_db, cluster): +def test_database_misc_methods(client, sys_db, db, bad_db, cluster, secret): # Test get properties properties = db.properties() assert "id" in properties @@ -132,6 +139,19 @@ def test_database_misc_methods(sys_db, db, bad_db, cluster): bad_db.role() assert err.value.error_code in {11, 1228} + # Test get/set server mode + assert sys_db.mode() == "default" + with assert_raises(ServerModeSetError): + sys_db.set_mode("badmode") + assert err.value.error_code in {11, 1228} + + with assert_raises(ServerModeSetError): + db.set_mode("readonly") + assert err.value.error_code in {11, 1228} + + result = sys_db.set_mode("default") + assert result == {"mode": "default"} + # Test get server status status = db.status() assert "host" in status @@ -166,6 +186,12 @@ def test_database_misc_methods(sys_db, db, bad_db, cluster): bad_db.echo() assert err.value.error_code in {11, 1228} + # Test echo (forward request) + body = "request goes here" + echo = db.echo(body) + assert isinstance(echo, dict) + assert echo["requestBody"] == body + # Test read_log with default parameters # Deprecated in 3.8.0 # TODO: Remove in future release @@ -253,6 +279,22 @@ def test_database_misc_methods(sys_db, db, bad_db, cluster): with assert_raises(ServerLogLevelSetError): bad_db.set_log_levels(**new_levels) + # Test Log Settings + result_1 = sys_db.set_log_settings(database=True, url=True, username=True) + result_2 = sys_db.log_settings() + assert isinstance(result_1, dict) + assert "database" in result_1 + assert "url" in result_1 + assert "username" in result_1 + assert result_1 == result_2 + + result_1 = sys_db.set_log_settings(database=True, username=False) + result_2 = sys_db.log_settings() + assert "database" in result_1 + assert "url" in result_1 + assert "username" not in result_1 + assert result_1 == result_2 + # Test get storage engine engine = db.engine() assert engine["name"] in ["rocksdb"] @@ -263,6 +305,32 @@ def test_database_misc_methods(sys_db, db, bad_db, cluster): bad_db.engine() assert err.value.error_code in {11, 1228} + with assert_raises(DatabaseSupportInfoError) as err: + db.support_info() + + info = sys_db.support_info() + assert isinstance(info, dict) + assert "deployment" in info + assert "date" in info + + # Test execute JavaScript code + assert db.execute(1) is None + assert db.execute(None) == {"error": False, "code": 200} + assert db.execute("") == {"error": False, "code": 200} + assert db.execute("return 1") == 1 + + # Test database compact + with assert_raises(DatabaseCompactError) as err: + db.compact() + + collection = db.create_collection(generate_col_name()) + collection.insert({"foo": "bar"}) + + token = generate_jwt(secret) + db_superuser = client.db(db.name, superuser_token=token) + result = db_superuser.compact() + assert result == {} + def test_database_management(db, sys_db, bad_db): # Test list databases @@ -347,10 +415,7 @@ def special_db_names(sys_db): pass -def test_database_utf8(sys_db, db_version, special_db_names): - if db_version < version.parse("3.11.0"): - pytest.skip("UTF8 collection names require ArangoDB 3.11+") - +def test_database_utf8(sys_db, special_db_names): for name in special_db_names: assert sys_db.create_database(name) assert sys_db.has_database(name) diff --git a/tests/test_document.py b/tests/test_document.py index a4127a27..bd471e42 100644 --- a/tests/test_document.py +++ b/tests/test_document.py @@ -1,4 +1,5 @@ import pytest +from packaging import version from arango.exceptions import ( DocumentCountError, @@ -2067,3 +2068,74 @@ def test_document_management_via_db(db, col): assert result["_id"] == doc1_id assert doc1_id not in col assert len(col) == 2 + + +def test_version_attributes_update_many(col, db_version): + if db_version < version.parse("3.12.0"): + pytest.skip("Version attributes is tested in 3.12.0+") + + col.insert_many( + [ + {"_key": "test1", "version": 0}, + {"_key": "test2", "version": 0}, + {"_key": "test3", "version": 0}, + ] + ) + + docs = [ + {"_key": "test1", "version": 2}, + {"_key": "test1", "version": 3}, + {"_key": "test1", "version": 1}, + {"_key": "test2", "version": 1}, + {"_key": "test2", "version": 9}, + {"_key": "test2", "version": 42}, + {"_key": "test2", "version": 0}, + {"_key": "test3"}, + {"_key": "test3", "version": 5}, + {"_key": "test3", "version": 4}, + {"_key": "test3", "value": 2}, + ] + + col.update_many(docs, version_attribute="version") + assert col["test1"]["version"] == 3 + assert col["test2"]["version"] == 42 + assert col["test3"]["version"] == 5 + + docs = [ + {"_key": "test1", "version": 2}, + {"_key": "test1", "version": 3}, + {"_key": "test1", "version": 5}, + {"_key": "test2", "version": 1}, + {"_key": "test2", "version": 9}, + {"_key": "test2", "version": 42}, + {"_key": "test2", "version": 0}, + {"_key": "test3", "version": 5}, + {"_key": "test3", "version": 6}, + ] + + col.replace_many(docs, version_attribute="version") + assert col["test1"]["version"] == 5 + assert col["test2"]["version"] == 42 + assert col["test3"]["version"] == 6 + + docs = [ + {"_key": "test1", "version": 0}, + {"_key": "test2", "version": 0}, + {"_key": "test3", "version": 0}, + ] + + col.insert_many(docs, overwrite_mode="update", version_attribute="version") + assert col["test1"]["version"] == 5 + assert col["test2"]["version"] == 42 + assert col["test3"]["version"] == 6 + + docs = [ + {"_key": "test1", "version": 43}, + {"_key": "test2", "version": 41}, + {"_key": "test3", "version": 43}, + ] + + col.insert_many(docs, overwrite_mode="replace", version_attribute="version") + assert col["test1"]["version"] == 43 + assert col["test2"]["version"] == 42 + assert col["test3"]["version"] == 43 diff --git a/tests/test_index.py b/tests/test_index.py index dbf235fa..41ffb301 100644 --- a/tests/test_index.py +++ b/tests/test_index.py @@ -107,7 +107,7 @@ def test_add_skiplist_index(icol): def test_add_geo_index(icol): # Test add geo index with one attribute result = icol.add_geo_index( - fields=["attr1"], ordered=False, name="geo_index", in_background=True + fields=["attr1"], geo_json=True, name="geo_index", in_background=True ) expected_index = { @@ -115,7 +115,7 @@ def test_add_geo_index(icol): "type": "geo", "fields": ["attr1"], "unique": False, - "geo_json": False, + "geo_json": True, "name": "geo_index", } for key, value in expected_index.items(): @@ -126,7 +126,7 @@ def test_add_geo_index(icol): # Test add geo index with two attributes result = icol.add_geo_index( fields=["attr1", "attr2"], - ordered=False, + geo_json=False, ) expected_index = { "sparse": True, @@ -220,10 +220,7 @@ def test_add_ttl_index(icol): icol.delete_index(result["id"]) -def test_add_inverted_index(icol, enterprise, db_version): - if db_version < version.parse("3.10.0"): - pytest.skip("Inverted indexes are not supported before 3.10.0") - +def test_add_inverted_index(icol, enterprise): parameters = dict( fields=[{"name": "attr1", "cache": True}], name="c0_cached", @@ -234,7 +231,7 @@ def test_add_inverted_index(icol, enterprise, db_version): ) expected_keys = ["primary_sort", "analyzer", "include_all_fields", "search_field"] - if enterprise and db_version >= version.parse("3.10.2"): + if enterprise: parameters["cache"] = True parameters["primaryKeyCache"] = True expected_keys.extend(["cache", "primaryKeyCache"]) @@ -248,6 +245,67 @@ def test_add_inverted_index(icol, enterprise, db_version): icol.delete_index(result["id"]) +def test_add_zkd_index(icol, db_version): + result = icol.add_zkd_index( + name="zkd_index", + fields=["x", "y", "z"], + field_value_types="double", + in_background=False, + unique=False, + ) + + expected_index = { + "name": "zkd_index", + "type": "zkd", + "fields": ["x", "y", "z"], + "new": True, + "unique": False, + } + + for key, value in expected_index.items(): + assert result[key] == value + + assert result["id"] in extract("id", icol.indexes()) + + with assert_raises(IndexCreateError) as err: + icol.add_zkd_index(field_value_types="integer", fields=["x", "y", "z"]) + assert err.value.error_code == 10 + + icol.delete_index(result["id"]) + + +def test_add_mdi_index(icol, db_version): + if db_version < version.parse("3.12.0"): + pytest.skip("MDI indexes are usable with 3.12+ only") + + result = icol.add_mdi_index( + name="mdi_index", + fields=["x", "y", "z"], + field_value_types="double", + in_background=False, + unique=True, + ) + + expected_index = { + "name": "mdi_index", + "type": "mdi", + "fields": ["x", "y", "z"], + "new": True, + "unique": True, + } + + for key, value in expected_index.items(): + assert result[key] == value + + assert result["id"] in extract("id", icol.indexes()) + + with assert_raises(IndexCreateError) as err: + icol.add_mdi_index(field_value_types="integer", fields=["x", "y", "z"]) + assert err.value.error_code == 10 + + icol.delete_index(result["id"]) + + def test_delete_index(icol, bad_col): old_indexes = set(extract("id", icol.indexes())) icol.add_hash_index(["attr3", "attr4"], unique=True) diff --git a/tests/test_pregel.py b/tests/test_pregel.py index e17da72b..2be8d5f0 100644 --- a/tests/test_pregel.py +++ b/tests/test_pregel.py @@ -3,15 +3,14 @@ import pytest from packaging import version -from arango.exceptions import ( - PregelJobCreateError, - PregelJobDeleteError, - PregelJobGetError, -) +from arango.exceptions import PregelJobCreateError, PregelJobDeleteError from tests.helpers import assert_raises, generate_string -def test_pregel_attributes(db, username): +def test_pregel_attributes(db, db_version, username): + if db_version >= version.parse("3.12.0"): + pytest.skip("Pregel is not tested in 3.12.0+") + assert db.pregel.context in ["default", "async", "batch", "transaction"] assert db.pregel.username == username assert db.pregel.db_name == db.name @@ -19,6 +18,9 @@ def test_pregel_attributes(db, username): def test_pregel_management(db, db_version, graph, cluster): + if db_version >= version.parse("3.12.0"): + pytest.skip("Pregel is not tested in 3.12.0+") + if cluster: pytest.skip("Not tested in a cluster setup") @@ -52,13 +54,8 @@ def test_pregel_management(db, db_version, graph, cluster): # Test delete existing pregel job assert db.pregel.delete_job(job_id) is True time.sleep(0.2) - if db_version < version.parse("3.11.0"): - with assert_raises(PregelJobGetError) as err: - db.pregel.job(job_id) - assert err.value.error_code in {4, 10, 1600} - else: - job = db.pregel.job(job_id) - assert job["state"] == "canceled" + job = db.pregel.job(job_id) + assert job["state"] == "canceled" # Test delete missing pregel job with assert_raises(PregelJobDeleteError) as err: diff --git a/tests/test_transaction.py b/tests/test_transaction.py index 59e86b7c..7edc2a9c 100644 --- a/tests/test_transaction.py +++ b/tests/test_transaction.py @@ -5,6 +5,7 @@ TransactionAbortError, TransactionCommitError, TransactionExecuteError, + TransactionFetchError, TransactionInitError, TransactionStatusError, ) @@ -117,6 +118,38 @@ def test_transaction_commit(db, col, docs): assert err.value.error_code in {10, 1655} +def test_transaction_fetch_existing(db, col, docs): + original_txn = db.begin_transaction( + read=col.name, + write=col.name, + exclusive=[], + sync=True, + allow_implicit=False, + lock_timeout=1000, + max_size=10000, + ) + txn_col = original_txn.collection(col.name) + + assert "_rev" in txn_col.insert(docs[0]) + assert "_rev" in txn_col.delete(docs[0]) + + txn_db = db.fetch_transaction(transaction_id=original_txn.transaction_id) + + txn_col = txn_db.collection(col.name) + assert "_rev" in txn_col.insert(docs[1]) + assert "_rev" in txn_col.delete(docs[1]) + + txn_db.commit_transaction() + assert txn_db.transaction_status() == "committed" + assert original_txn.transaction_status() == "committed" + assert txn_db.transaction_id == original_txn.transaction_id + + # Test fetch transaction that does not exist + with pytest.raises(TransactionFetchError) as err: + db.fetch_transaction(transaction_id="illegal") + assert err.value.error_code in {10, 1655} + + def test_transaction_abort(db, col, docs): txn_db = db.begin_transaction(write=col.name) txn_col = txn_db.collection(col.name) diff --git a/tests/test_view.py b/tests/test_view.py index fd8a5640..418892b5 100644 --- a/tests/test_view.py +++ b/tests/test_view.py @@ -1,5 +1,3 @@ -from packaging import version - from arango.exceptions import ( ViewCreateError, ViewDeleteError, @@ -180,7 +178,7 @@ def test_arangosearch_view_management(db, bad_db, cluster): assert db.delete_view(view_name, ignore_missing=False) is True -def test_arangosearch_view_properties(db, col, enterprise, db_version): +def test_arangosearch_view_properties(db, col, enterprise): view_name = generate_view_name() params = {"consolidationIntervalMsec": 50000} @@ -199,10 +197,8 @@ def test_arangosearch_view_properties(db, col, enterprise, db_version): } ) - if db_version >= version.parse("3.9.6"): - params.update({"primarySortCache": True, "primaryKeyCache": True}) - if db_version >= version.parse("3.10.3"): - params.update({"storedValues": ["attr1", "attr2"]}) + params.update({"primarySortCache": True, "primaryKeyCache": True}) + params.update({"storedValues": ["attr1", "attr2"]}) result = db.create_arangosearch_view(view_name, properties=params) assert "id" in result