From bacfc745ea56dd224d498e7843445b607f0772c8 Mon Sep 17 00:00:00 2001 From: Miguel Grinberg Date: Fri, 13 Sep 2024 14:01:26 +0100 Subject: [PATCH] Autogenerate Query classes (#1890) * Autogenerate Query classes * handle Python reserved keywords such as from * replace long deprecated 'filtered' query from tests * minor code generation updates * more code generation updates * address typing issues * clean up code generation templates * more code generator cleanup * add a unit test using some of the generated classes * no need to "reset" the interface list * use the transport's DEFAULT type * include inherited properties in docstrings and constructors * support legacy FieldValueFactor name for FieldValueFactorScore * Update utils/generator.py Co-authored-by: Quentin Pradet * use the identity operator to check for defaults * rename interfaces.py to types.py * leave undocumented classes and attributes with an empty docstring * add unit test for AttrDict with from reserved keyword * add a dependency on the transport library * Update utils/generator.py Co-authored-by: Quentin Pradet * list required arguments first in types.py * add server defaults to argument docstrings * remove unnecessary quotes from type hints in types.py * Update utils/templates/types.py.tpl Co-authored-by: Quentin Pradet * Update utils/generator.py Co-authored-by: Quentin Pradet * Update utils/generator.py Co-authored-by: Quentin Pradet * final round of review improvements --------- Co-authored-by: Quentin Pradet --- elasticsearch_dsl/faceted_search_base.py | 32 +- elasticsearch_dsl/function.py | 47 +- elasticsearch_dsl/query.py | 2539 +++++++++++++++-- elasticsearch_dsl/types.py | 3290 ++++++++++++++++++++++ elasticsearch_dsl/utils.py | 14 +- noxfile.py | 3 +- setup.py | 2 + tests/_async/test_search.py | 110 +- tests/_async/test_update_by_query.py | 32 +- tests/_sync/test_search.py | 102 +- tests/_sync/test_update_by_query.py | 32 +- tests/test_query.py | 2 +- tests/test_utils.py | 9 + utils/generator.py | 545 ++++ utils/templates/query.py.tpl | 374 +++ utils/templates/types.py.tpl | 84 + 16 files changed, 6964 insertions(+), 253 deletions(-) create mode 100644 elasticsearch_dsl/types.py create mode 100644 utils/generator.py create mode 100644 utils/templates/query.py.tpl create mode 100644 utils/templates/types.py.tpl diff --git a/elasticsearch_dsl/faceted_search_base.py b/elasticsearch_dsl/faceted_search_base.py index b959b05a..45e00a00 100644 --- a/elasticsearch_dsl/faceted_search_base.py +++ b/elasticsearch_dsl/faceted_search_base.py @@ -141,9 +141,7 @@ class TermsFacet(Facet[_R]): def add_filter(self, filter_values: List[FilterValueType]) -> Optional[Query]: """Create a terms filter instead of bool containing term filters.""" if filter_values: - return Terms( - _expand__to_dot=False, **{self._params["field"]: filter_values} - ) + return Terms(self._params["field"], filter_values, _expand__to_dot=False) return None @@ -173,13 +171,13 @@ def __init__( def get_value_filter(self, filter_value: FilterValueType) -> Query: f, t = self._ranges[filter_value] - limits = {} + limits: Dict[str, Any] = {} if f is not None: limits["gte"] = f if t is not None: limits["lt"] = t - return Range(_expand__to_dot=False, **{self._params["field"]: limits}) + return Range(self._params["field"], limits, _expand__to_dot=False) class HistogramFacet(Facet[_R]): @@ -187,13 +185,12 @@ class HistogramFacet(Facet[_R]): def get_value_filter(self, filter_value: FilterValueType) -> Range: return Range( - _expand__to_dot=False, - **{ - self._params["field"]: { - "gte": filter_value, - "lt": filter_value + self._params["interval"], - } + self._params["field"], + { + "gte": filter_value, + "lt": filter_value + self._params["interval"], }, + _expand__to_dot=False, ) @@ -258,15 +255,12 @@ def get_value_filter(self, filter_value: Any) -> Range: interval_type = "interval" return Range( - _expand__to_dot=False, - **{ - self._params["field"]: { - "gte": filter_value, - "lt": self.DATE_INTERVALS[self._params[interval_type]]( - filter_value - ), - } + self._params["field"], + { + "gte": filter_value, + "lt": self.DATE_INTERVALS[self._params[interval_type]](filter_value), }, + _expand__to_dot=False, ) diff --git a/elasticsearch_dsl/function.py b/elasticsearch_dsl/function.py index 5da92f70..9744e6f8 100644 --- a/elasticsearch_dsl/function.py +++ b/elasticsearch_dsl/function.py @@ -17,9 +17,20 @@ import collections.abc from copy import deepcopy -from typing import Any, ClassVar, Dict, MutableMapping, Optional, Union, overload +from typing import ( + Any, + ClassVar, + Dict, + Literal, + MutableMapping, + Optional, + Union, + overload, +) -from .utils import DslBase +from elastic_transport.client_utils import DEFAULT, DefaultType + +from .utils import AttrDict, DslBase @overload @@ -123,10 +134,14 @@ class RandomScore(ScoreFunction): name = "random_score" -class FieldValueFactor(ScoreFunction): +class FieldValueFactorScore(ScoreFunction): name = "field_value_factor" +class FieldValueFactor(FieldValueFactorScore): # alias of the above + pass + + class Linear(ScoreFunction): name = "linear" @@ -137,3 +152,29 @@ class Gauss(ScoreFunction): class Exp(ScoreFunction): name = "exp" + + +class DecayFunction(AttrDict[Any]): + def __init__( + self, + *, + decay: Union[float, "DefaultType"] = DEFAULT, + offset: Any = DEFAULT, + scale: Any = DEFAULT, + origin: Any = DEFAULT, + multi_value_mode: Union[ + Literal["min", "max", "avg", "sum"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if decay != DEFAULT: + kwargs["decay"] = decay + if offset != DEFAULT: + kwargs["offset"] = offset + if scale != DEFAULT: + kwargs["scale"] = scale + if origin != DEFAULT: + kwargs["origin"] = origin + if multi_value_mode != DEFAULT: + kwargs["multi_value_mode"] = multi_value_mode + super().__init__(kwargs) diff --git a/elasticsearch_dsl/query.py b/elasticsearch_dsl/query.py index 652b3d57..607df3ff 100644 --- a/elasticsearch_dsl/query.py +++ b/elasticsearch_dsl/query.py @@ -19,26 +19,39 @@ from copy import deepcopy from itertools import chain from typing import ( + TYPE_CHECKING, Any, Callable, ClassVar, + Dict, List, + Literal, Mapping, MutableMapping, Optional, Protocol, + Sequence, TypeVar, Union, cast, overload, ) +from elastic_transport.client_utils import DEFAULT + # 'SF' looks unused but the test suite assumes it's available # from this module so others are liable to do so as well. from .function import SF # noqa: F401 from .function import ScoreFunction from .utils import DslBase +if TYPE_CHECKING: + from elastic_transport.client_utils import DefaultType + + from elasticsearch_dsl import types, wrappers + + from .document_base import InstrumentedField + _T = TypeVar("_T") _M = TypeVar("_M", bound=Mapping[str, Any]) @@ -135,52 +148,60 @@ def __and__(self, other: "Query") -> "Query": return Bool(must=[self, other]) -class MatchAll(Query): - name = "match_all" - - def __add__(self, other: "Query") -> "Query": - return other._clone() - - __and__ = __rand__ = __radd__ = __add__ - - def __or__(self, other: "Query") -> "MatchAll": - return self - - __ror__ = __or__ - - def __invert__(self) -> "MatchNone": - return MatchNone() - - -EMPTY_QUERY = MatchAll() - - -class MatchNone(Query): - name = "match_none" - - def __add__(self, other: "Query") -> "MatchNone": - return self - - __and__ = __rand__ = __radd__ = __add__ - - def __or__(self, other: "Query") -> "Query": - return other._clone() - - __ror__ = __or__ - - def __invert__(self) -> MatchAll: - return MatchAll() - - class Bool(Query): + """ + matches documents matching boolean combinations of other queries. + + :arg filter: The clause (query) must appear in matching documents. + However, unlike `must`, the score of the query will be ignored. + :arg minimum_should_match: Specifies the number or percentage of + `should` clauses returned documents must match. + :arg must: The clause (query) must appear in matching documents and + will contribute to the score. + :arg must_not: The clause (query) must not appear in the matching + documents. Because scoring is ignored, a score of `0` is returned + for all documents. + :arg should: The clause (query) should appear in the matching + document. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + name = "bool" _param_defs = { + "filter": {"type": "query", "multi": True}, "must": {"type": "query", "multi": True}, - "should": {"type": "query", "multi": True}, "must_not": {"type": "query", "multi": True}, - "filter": {"type": "query", "multi": True}, + "should": {"type": "query", "multi": True}, } + def __init__( + self, + *, + filter: Union[Query, Sequence[Query], "DefaultType"] = DEFAULT, + minimum_should_match: Union[int, str, "DefaultType"] = DEFAULT, + must: Union[Query, Sequence[Query], "DefaultType"] = DEFAULT, + must_not: Union[Query, Sequence[Query], "DefaultType"] = DEFAULT, + should: Union[Query, Sequence[Query], "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + filter=filter, + minimum_should_match=minimum_should_match, + must=must, + must_not=must_not, + should=should, + boost=boost, + _name=_name, + **kwargs, + ) + def __add__(self, other: Query) -> "Bool": q = self._clone() if isinstance(other, Bool): @@ -290,7 +311,305 @@ def __and__(self, other: Query) -> Query: __rand__ = __and__ +class Boosting(Query): + """ + Returns documents matching a `positive` query while reducing the + relevance score of documents that also match a `negative` query. + + :arg negative_boost: (required) Floating point number between 0 and + 1.0 used to decrease the relevance scores of documents matching + the `negative` query. + :arg negative: (required) Query used to decrease the relevance score + of matching documents. + :arg positive: (required) Any returned documents must match this + query. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "boosting" + _param_defs = { + "negative": {"type": "query"}, + "positive": {"type": "query"}, + } + + def __init__( + self, + *, + negative_boost: Union[float, "DefaultType"] = DEFAULT, + negative: Union[Query, "DefaultType"] = DEFAULT, + positive: Union[Query, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + negative_boost=negative_boost, + negative=negative, + positive=positive, + boost=boost, + _name=_name, + **kwargs, + ) + + +class Common(Query): + """ + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ + + name = "common" + + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union[ + "types.CommonTermsQuery", Dict[str, Any], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) + + +class CombinedFields(Query): + """ + The `combined_fields` query supports searching multiple text fields as + if their contents had been indexed into one combined field. + + :arg fields: (required) List of fields to search. Field wildcard + patterns are allowed. Only `text` fields are supported, and they + must all have the same search `analyzer`. + :arg query: (required) Text to search for in the provided `fields`. + The `combined_fields` query analyzes the provided text before + performing a search. + :arg auto_generate_synonyms_phrase_query: If true, match phrase + queries are automatically created for multi-term synonyms. + Defaults to `True` if omitted. + :arg operator: Boolean logic used to interpret text in the query + value. Defaults to `or` if omitted. + :arg minimum_should_match: Minimum number of clauses that must match + for a document to be returned. + :arg zero_terms_query: Indicates whether no documents are returned if + the analyzer removes all tokens, such as when using a `stop` + filter. Defaults to `none` if omitted. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "combined_fields" + + def __init__( + self, + *, + fields: Union[ + Sequence[Union[str, "InstrumentedField"]], "DefaultType" + ] = DEFAULT, + query: Union[str, "DefaultType"] = DEFAULT, + auto_generate_synonyms_phrase_query: Union[bool, "DefaultType"] = DEFAULT, + operator: Union[Literal["or", "and"], "DefaultType"] = DEFAULT, + minimum_should_match: Union[int, str, "DefaultType"] = DEFAULT, + zero_terms_query: Union[Literal["none", "all"], "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + fields=fields, + query=query, + auto_generate_synonyms_phrase_query=auto_generate_synonyms_phrase_query, + operator=operator, + minimum_should_match=minimum_should_match, + zero_terms_query=zero_terms_query, + boost=boost, + _name=_name, + **kwargs, + ) + + +class ConstantScore(Query): + """ + Wraps a filter query and returns every matching document with a + relevance score equal to the `boost` parameter value. + + :arg filter: (required) Filter query you wish to run. Any returned + documents must match this query. Filter queries do not calculate + relevance scores. To speed up performance, Elasticsearch + automatically caches frequently used filter queries. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "constant_score" + _param_defs = { + "filter": {"type": "query"}, + } + + def __init__( + self, + *, + filter: Union[Query, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(filter=filter, boost=boost, _name=_name, **kwargs) + + +class DisMax(Query): + """ + Returns documents matching one or more wrapped queries, called query + clauses or clauses. If a returned document matches multiple query + clauses, the `dis_max` query assigns the document the highest + relevance score from any matching clause, plus a tie breaking + increment for any additional matching subqueries. + + :arg queries: (required) One or more query clauses. Returned documents + must match one or more of these queries. If a document matches + multiple queries, Elasticsearch uses the highest relevance score. + :arg tie_breaker: Floating point number between 0 and 1.0 used to + increase the relevance scores of documents matching multiple query + clauses. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "dis_max" + _param_defs = { + "queries": {"type": "query", "multi": True}, + } + + def __init__( + self, + *, + queries: Union[Sequence[Query], "DefaultType"] = DEFAULT, + tie_breaker: Union[float, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + queries=queries, tie_breaker=tie_breaker, boost=boost, _name=_name, **kwargs + ) + + +class DistanceFeature(Query): + """ + Boosts the relevance score of documents closer to a provided origin + date or point. For example, you can use this query to give more weight + to documents closer to a certain date or location. + + :arg origin: (required) Date or point of origin used to calculate + distances. If the `field` value is a `date` or `date_nanos` field, + the `origin` value must be a date. Date Math, such as `now-1h`, is + supported. If the field value is a `geo_point` field, the `origin` + value must be a geopoint. + :arg pivot: (required) Distance from the `origin` at which relevance + scores receive half of the `boost` value. If the `field` value is + a `date` or `date_nanos` field, the `pivot` value must be a time + unit, such as `1h` or `10d`. If the `field` value is a `geo_point` + field, the `pivot` value must be a distance unit, such as `1km` or + `12m`. + :arg field: (required) Name of the field used to calculate distances. + This field must meet the following criteria: be a `date`, + `date_nanos` or `geo_point` field; have an `index` mapping + parameter value of `true`, which is the default; have an + `doc_values` mapping parameter value of `true`, which is the + default. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "distance_feature" + + def __init__( + self, + *, + origin: Any = DEFAULT, + pivot: Any = DEFAULT, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + origin=origin, pivot=pivot, field=field, boost=boost, _name=_name, **kwargs + ) + + +class Exists(Query): + """ + Returns documents that contain an indexed value for a field. + + :arg field: (required) Name of the field you wish to search. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "exists" + + def __init__( + self, + *, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(field=field, boost=boost, _name=_name, **kwargs) + + class FunctionScore(Query): + """ + The `function_score` enables you to modify the score of documents that + are retrieved by a query. + + :arg boost_mode: Defines how he newly computed score is combined with + the score of the query Defaults to `multiply` if omitted. + :arg functions: One or more functions that compute a new score for + each document returned by the query. + :arg max_boost: Restricts the new score to not exceed the provided + limit. + :arg min_score: Excludes documents that do not meet the provided score + threshold. + :arg query: A query that determines the documents for which a new + score is computed. + :arg score_mode: Specifies how the computed scores are combined + Defaults to `multiply` if omitted. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + name = "function_score" _param_defs = { "query": {"type": "query"}, @@ -298,258 +617,2034 @@ class FunctionScore(Query): "functions": {"type": "score_function", "multi": True}, } - def __init__(self, **kwargs: Any): - if "functions" in kwargs: - pass - else: - fns = kwargs["functions"] = [] + def __init__( + self, + *, + boost_mode: Union[ + Literal["multiply", "replace", "sum", "avg", "max", "min"], "DefaultType" + ] = DEFAULT, + functions: Union[ + Sequence["types.FunctionScoreContainer"], Dict[str, Any], "DefaultType" + ] = DEFAULT, + max_boost: Union[float, "DefaultType"] = DEFAULT, + min_score: Union[float, "DefaultType"] = DEFAULT, + query: Union[Query, "DefaultType"] = DEFAULT, + score_mode: Union[ + Literal["multiply", "sum", "avg", "first", "max", "min"], "DefaultType" + ] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if functions is DEFAULT: + functions = [] for name in ScoreFunction._classes: if name in kwargs: - fns.append({name: kwargs.pop(name)}) - super().__init__(**kwargs) + functions.append({name: kwargs.pop(name)}) # type: ignore + super().__init__( + boost_mode=boost_mode, + functions=functions, + max_boost=max_boost, + min_score=min_score, + query=query, + score_mode=score_mode, + boost=boost, + _name=_name, + **kwargs, + ) -# compound queries -class Boosting(Query): - name = "boosting" - _param_defs = {"positive": {"type": "query"}, "negative": {"type": "query"}} +class Fuzzy(Query): + """ + Returns documents that contain terms similar to the search term, as + measured by a Levenshtein edit distance. + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ -class ConstantScore(Query): - name = "constant_score" - _param_defs = {"query": {"type": "query"}, "filter": {"type": "query"}} + name = "fuzzy" + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union["types.FuzzyQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) -class DisMax(Query): - name = "dis_max" - _param_defs = {"queries": {"type": "query", "multi": True}} +class GeoBoundingBox(Query): + """ + Matches geo_point and geo_shape values that intersect a bounding box. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + :arg type: + :arg validation_method: Set to `IGNORE_MALFORMED` to accept geo points + with invalid latitude or longitude. Set to `COERCE` to also try to + infer correct latitude or longitude. Defaults to `'strict'` if + omitted. + :arg ignore_unmapped: Set to `true` to ignore an unmapped field and + not match any documents for this query. Set to `false` to throw an + exception if the field is not mapped. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ -class Filtered(Query): - name = "filtered" - _param_defs = {"query": {"type": "query"}, "filter": {"type": "query"}} + name = "geo_bounding_box" + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union[ + "types.CoordsGeoBounds", + "types.TopLeftBottomRightGeoBounds", + "types.TopRightBottomLeftGeoBounds", + "types.WktGeoBounds", + Dict[str, Any], + "DefaultType", + ] = DEFAULT, + *, + type: Union[Literal["memory", "indexed"], "DefaultType"] = DEFAULT, + validation_method: Union[ + Literal["coerce", "ignore_malformed", "strict"], "DefaultType" + ] = DEFAULT, + ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__( + type=type, + validation_method=validation_method, + ignore_unmapped=ignore_unmapped, + boost=boost, + _name=_name, + **kwargs, + ) -class Indices(Query): - name = "indices" - _param_defs = {"query": {"type": "query"}, "no_match_query": {"type": "query"}} +class GeoDistance(Query): + """ + Matches `geo_point` and `geo_shape` values within a given distance of + a geopoint. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + :arg distance: (required) The radius of the circle centred on the + specified location. Points which fall into this circle are + considered to be matches. + :arg distance_type: How to compute the distance. Set to `plane` for a + faster calculation that's inaccurate on long distances and close + to the poles. Defaults to `'arc'` if omitted. + :arg validation_method: Set to `IGNORE_MALFORMED` to accept geo points + with invalid latitude or longitude. Set to `COERCE` to also try to + infer correct latitude or longitude. Defaults to `'strict'` if + omitted. + :arg ignore_unmapped: Set to `true` to ignore an unmapped field and + not match any documents for this query. Set to `false` to throw an + exception if the field is not mapped. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ -class Percolate(Query): - name = "percolate" + name = "geo_distance" + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union[ + "types.LatLonGeoLocation", + "types.GeoHashLocation", + Sequence[float], + str, + Dict[str, Any], + "DefaultType", + ] = DEFAULT, + *, + distance: Union[str, "DefaultType"] = DEFAULT, + distance_type: Union[Literal["arc", "plane"], "DefaultType"] = DEFAULT, + validation_method: Union[ + Literal["coerce", "ignore_malformed", "strict"], "DefaultType" + ] = DEFAULT, + ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__( + distance=distance, + distance_type=distance_type, + validation_method=validation_method, + ignore_unmapped=ignore_unmapped, + boost=boost, + _name=_name, + **kwargs, + ) -# relationship queries -class Nested(Query): - name = "nested" - _param_defs = {"query": {"type": "query"}} +class GeoPolygon(Query): + """ + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + :arg validation_method: Defaults to `'strict'` if omitted. + :arg ignore_unmapped: + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ -class HasChild(Query): - name = "has_child" - _param_defs = {"query": {"type": "query"}} + name = "geo_polygon" + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union[ + "types.GeoPolygonPoints", Dict[str, Any], "DefaultType" + ] = DEFAULT, + *, + validation_method: Union[ + Literal["coerce", "ignore_malformed", "strict"], "DefaultType" + ] = DEFAULT, + ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__( + validation_method=validation_method, + ignore_unmapped=ignore_unmapped, + boost=boost, + _name=_name, + **kwargs, + ) -class HasParent(Query): - name = "has_parent" - _param_defs = {"query": {"type": "query"}} +class GeoShape(Query): + """ + Filter documents indexed using either the `geo_shape` or the + `geo_point` type. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + :arg ignore_unmapped: Set to `true` to ignore an unmapped field and + not match any documents for this query. Set to `false` to throw an + exception if the field is not mapped. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ -class TopChildren(Query): - name = "top_children" - _param_defs = {"query": {"type": "query"}} + name = "geo_shape" + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union[ + "types.GeoShapeFieldQuery", Dict[str, Any], "DefaultType" + ] = DEFAULT, + *, + ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__( + ignore_unmapped=ignore_unmapped, boost=boost, _name=_name, **kwargs + ) -# compound span queries -class SpanFirst(Query): - name = "span_first" - _param_defs = {"match": {"type": "query"}} +class HasChild(Query): + """ + Returns parent documents whose joined child documents match a provided + query. + + :arg query: (required) Query you wish to run on child documents of the + `type` field. If a child document matches the search, the query + returns the parent document. + :arg type: (required) Name of the child relationship mapped for the + `join` field. + :arg ignore_unmapped: Indicates whether to ignore an unmapped `type` + and not return any documents instead of an error. + :arg inner_hits: If defined, each search hit will contain inner hits. + :arg max_children: Maximum number of child documents that match the + query allowed for a returned parent document. If the parent + document exceeds this limit, it is excluded from the search + results. + :arg min_children: Minimum number of child documents that match the + query required to match the query for a returned parent document. + If the parent document does not meet this limit, it is excluded + from the search results. + :arg score_mode: Indicates how scores for matching child documents + affect the root parent document’s relevance score. Defaults to + `'none'` if omitted. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ -class SpanMulti(Query): - name = "span_multi" - _param_defs = {"match": {"type": "query"}} + name = "has_child" + _param_defs = { + "query": {"type": "query"}, + } + def __init__( + self, + *, + query: Union[Query, "DefaultType"] = DEFAULT, + type: Union[str, "DefaultType"] = DEFAULT, + ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT, + inner_hits: Union["types.InnerHits", Dict[str, Any], "DefaultType"] = DEFAULT, + max_children: Union[int, "DefaultType"] = DEFAULT, + min_children: Union[int, "DefaultType"] = DEFAULT, + score_mode: Union[ + Literal["none", "avg", "sum", "max", "min"], "DefaultType" + ] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + query=query, + type=type, + ignore_unmapped=ignore_unmapped, + inner_hits=inner_hits, + max_children=max_children, + min_children=min_children, + score_mode=score_mode, + boost=boost, + _name=_name, + **kwargs, + ) -class SpanNear(Query): - name = "span_near" - _param_defs = {"clauses": {"type": "query", "multi": True}} +class HasParent(Query): + """ + Returns child documents whose joined parent document matches a + provided query. + + :arg parent_type: (required) Name of the parent relationship mapped + for the `join` field. + :arg query: (required) Query you wish to run on parent documents of + the `parent_type` field. If a parent document matches the search, + the query returns its child documents. + :arg ignore_unmapped: Indicates whether to ignore an unmapped + `parent_type` and not return any documents instead of an error. + You can use this parameter to query multiple indices that may not + contain the `parent_type`. + :arg inner_hits: If defined, each search hit will contain inner hits. + :arg score: Indicates whether the relevance score of a matching parent + document is aggregated into its child documents. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ -class SpanNot(Query): - name = "span_not" - _param_defs = {"exclude": {"type": "query"}, "include": {"type": "query"}} + name = "has_parent" + _param_defs = { + "query": {"type": "query"}, + } + def __init__( + self, + *, + parent_type: Union[str, "DefaultType"] = DEFAULT, + query: Union[Query, "DefaultType"] = DEFAULT, + ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT, + inner_hits: Union["types.InnerHits", Dict[str, Any], "DefaultType"] = DEFAULT, + score: Union[bool, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + parent_type=parent_type, + query=query, + ignore_unmapped=ignore_unmapped, + inner_hits=inner_hits, + score=score, + boost=boost, + _name=_name, + **kwargs, + ) -class SpanOr(Query): - name = "span_or" - _param_defs = {"clauses": {"type": "query", "multi": True}} +class Ids(Query): + """ + Returns documents based on their IDs. This query uses document IDs + stored in the `_id` field. + + :arg values: An array of document IDs. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ -class FieldMaskingSpan(Query): - name = "field_masking_span" - _param_defs = {"query": {"type": "query"}} + name = "ids" + def __init__( + self, + *, + values: Union[str, Sequence[str], "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(values=values, boost=boost, _name=_name, **kwargs) -class SpanContaining(Query): - name = "span_containing" - _param_defs = {"little": {"type": "query"}, "big": {"type": "query"}} +class Intervals(Query): + """ + Returns documents based on the order and proximity of matching terms. -# Original implementation contained -# a typo: remove in v8.0. -SpanContainining = SpanContaining + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ + name = "intervals" -class SpanWithin(Query): - name = "span_within" - _param_defs = {"little": {"type": "query"}, "big": {"type": "query"}} + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union["types.IntervalsQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) -# core queries -class CombinedFields(Query): - name = "combined_fields" +class Knn(Query): + """ + Finds the k nearest vectors to a query vector, as measured by a + similarity metric. knn query finds nearest vectors through approximate + search on indexed dense_vectors. + + :arg field: (required) The name of the vector field to search against + :arg query_vector: The query vector + :arg query_vector_builder: The query vector builder. You must provide + a query_vector_builder or query_vector, but not both. + :arg num_candidates: The number of nearest neighbor candidates to + consider per shard + :arg k: The final number of nearest neighbors to return as top hits + :arg filter: Filters for the kNN search query + :arg similarity: The minimum similarity for a vector to be considered + a match + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + name = "knn" + _param_defs = { + "filter": {"type": "query", "multi": True}, + } -class Common(Query): - name = "common" + def __init__( + self, + *, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + query_vector: Union[Sequence[float], "DefaultType"] = DEFAULT, + query_vector_builder: Union[ + "types.QueryVectorBuilder", Dict[str, Any], "DefaultType" + ] = DEFAULT, + num_candidates: Union[int, "DefaultType"] = DEFAULT, + k: Union[int, "DefaultType"] = DEFAULT, + filter: Union[Query, Sequence[Query], "DefaultType"] = DEFAULT, + similarity: Union[float, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + field=field, + query_vector=query_vector, + query_vector_builder=query_vector_builder, + num_candidates=num_candidates, + k=k, + filter=filter, + similarity=similarity, + boost=boost, + _name=_name, + **kwargs, + ) -class Fuzzy(Query): - name = "fuzzy" +class Match(Query): + """ + Returns documents that match a provided text, number, date or boolean + value. The provided text is analyzed before matching. + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ -class FuzzyLikeThis(Query): - name = "fuzzy_like_this" + name = "match" + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union["types.MatchQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) -class FuzzyLikeThisField(Query): - name = "fuzzy_like_this_field" +class MatchAll(Query): + """ + Matches all documents, giving them all a `_score` of 1.0. -class RankFeature(Query): - name = "rank_feature" + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + name = "match_all" -class DistanceFeature(Query): - name = "distance_feature" + def __init__( + self, + *, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(boost=boost, _name=_name, **kwargs) + def __add__(self, other: "Query") -> "Query": + return other._clone() -class GeoBoundingBox(Query): - name = "geo_bounding_box" + __and__ = __rand__ = __radd__ = __add__ + def __or__(self, other: "Query") -> "MatchAll": + return self -class GeoDistance(Query): - name = "geo_distance" + __ror__ = __or__ + def __invert__(self) -> "MatchNone": + return MatchNone() -class GeoDistanceRange(Query): - name = "geo_distance_range" +EMPTY_QUERY = MatchAll() -class GeoPolygon(Query): - name = "geo_polygon" +class MatchBoolPrefix(Query): + """ + Analyzes its input and constructs a `bool` query from the terms. Each + term except the last is used in a `term` query. The last term is used + in a prefix query. -class GeoShape(Query): - name = "geo_shape" + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ + name = "match_bool_prefix" -class GeohashCell(Query): - name = "geohash_cell" + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union[ + "types.MatchBoolPrefixQuery", Dict[str, Any], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) -class Ids(Query): - name = "ids" +class MatchNone(Query): + """ + Matches no documents. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ -class Intervals(Query): - name = "intervals" + name = "match_none" + def __init__( + self, + *, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(boost=boost, _name=_name, **kwargs) -class Knn(Query): - name = "knn" + def __add__(self, other: "Query") -> "MatchNone": + return self + __and__ = __rand__ = __radd__ = __add__ -class Limit(Query): - name = "limit" + def __or__(self, other: "Query") -> "Query": + return other._clone() + __ror__ = __or__ -class Match(Query): - name = "match" + def __invert__(self) -> MatchAll: + return MatchAll() class MatchPhrase(Query): + """ + Analyzes the text and creates a phrase query out of the analyzed text. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ + name = "match_phrase" + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union[ + "types.MatchPhraseQuery", Dict[str, Any], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) -class MatchPhrasePrefix(Query): - name = "match_phrase_prefix" +class MatchPhrasePrefix(Query): + """ + Returns documents that contain the words of a provided text, in the + same order as provided. The last term of the provided text is treated + as a prefix, matching any words that begin with that term. -class MatchBoolPrefix(Query): - name = "match_bool_prefix" + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ + name = "match_phrase_prefix" -class Exists(Query): - name = "exists" + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union[ + "types.MatchPhrasePrefixQuery", Dict[str, Any], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) class MoreLikeThis(Query): - name = "more_like_this" + """ + Returns documents that are "like" a given set of documents. + + :arg like: (required) Specifies free form text and/or a single or + multiple documents for which you want to find similar documents. + :arg analyzer: The analyzer that is used to analyze the free form + text. Defaults to the analyzer associated with the first field in + fields. + :arg boost_terms: Each term in the formed query could be further + boosted by their tf-idf score. This sets the boost factor to use + when using this feature. Defaults to deactivated (0). + :arg fail_on_unsupported_field: Controls whether the query should fail + (throw an exception) if any of the specified fields are not of the + supported types (`text` or `keyword`). Defaults to `True` if + omitted. + :arg fields: A list of fields to fetch and analyze the text from. + Defaults to the `index.query.default_field` index setting, which + has a default value of `*`. + :arg include: Specifies whether the input documents should also be + included in the search results returned. + :arg max_doc_freq: The maximum document frequency above which the + terms are ignored from the input document. + :arg max_query_terms: The maximum number of query terms that can be + selected. Defaults to `25` if omitted. + :arg max_word_length: The maximum word length above which the terms + are ignored. Defaults to unbounded (`0`). + :arg min_doc_freq: The minimum document frequency below which the + terms are ignored from the input document. Defaults to `5` if + omitted. + :arg minimum_should_match: After the disjunctive query has been + formed, this parameter controls the number of terms that must + match. + :arg min_term_freq: The minimum term frequency below which the terms + are ignored from the input document. Defaults to `2` if omitted. + :arg min_word_length: The minimum word length below which the terms + are ignored. + :arg routing: + :arg stop_words: An array of stop words. Any word in this set is + ignored. + :arg unlike: Used in combination with `like` to exclude documents that + match a set of terms. + :arg version: + :arg version_type: Defaults to `'internal'` if omitted. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + name = "more_like_this" -class MoreLikeThisField(Query): - name = "more_like_this_field" + def __init__( + self, + *, + like: Union[ + Union[str, "types.LikeDocument"], + Sequence[Union[str, "types.LikeDocument"]], + Dict[str, Any], + "DefaultType", + ] = DEFAULT, + analyzer: Union[str, "DefaultType"] = DEFAULT, + boost_terms: Union[float, "DefaultType"] = DEFAULT, + fail_on_unsupported_field: Union[bool, "DefaultType"] = DEFAULT, + fields: Union[ + Sequence[Union[str, "InstrumentedField"]], "DefaultType" + ] = DEFAULT, + include: Union[bool, "DefaultType"] = DEFAULT, + max_doc_freq: Union[int, "DefaultType"] = DEFAULT, + max_query_terms: Union[int, "DefaultType"] = DEFAULT, + max_word_length: Union[int, "DefaultType"] = DEFAULT, + min_doc_freq: Union[int, "DefaultType"] = DEFAULT, + minimum_should_match: Union[int, str, "DefaultType"] = DEFAULT, + min_term_freq: Union[int, "DefaultType"] = DEFAULT, + min_word_length: Union[int, "DefaultType"] = DEFAULT, + routing: Union[str, "DefaultType"] = DEFAULT, + stop_words: Union[str, Sequence[str], "DefaultType"] = DEFAULT, + unlike: Union[ + Union[str, "types.LikeDocument"], + Sequence[Union[str, "types.LikeDocument"]], + Dict[str, Any], + "DefaultType", + ] = DEFAULT, + version: Union[int, "DefaultType"] = DEFAULT, + version_type: Union[ + Literal["internal", "external", "external_gte", "force"], "DefaultType" + ] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + like=like, + analyzer=analyzer, + boost_terms=boost_terms, + fail_on_unsupported_field=fail_on_unsupported_field, + fields=fields, + include=include, + max_doc_freq=max_doc_freq, + max_query_terms=max_query_terms, + max_word_length=max_word_length, + min_doc_freq=min_doc_freq, + minimum_should_match=minimum_should_match, + min_term_freq=min_term_freq, + min_word_length=min_word_length, + routing=routing, + stop_words=stop_words, + unlike=unlike, + version=version, + version_type=version_type, + boost=boost, + _name=_name, + **kwargs, + ) class MultiMatch(Query): + """ + Enables you to search for a provided text, number, date or boolean + value across multiple fields. The provided text is analyzed before + matching. + + :arg query: (required) Text, number, boolean value or date you wish to + find in the provided field. + :arg analyzer: Analyzer used to convert the text in the query value + into tokens. + :arg auto_generate_synonyms_phrase_query: If `true`, match phrase + queries are automatically created for multi-term synonyms. + Defaults to `True` if omitted. + :arg cutoff_frequency: + :arg fields: The fields to be queried. Defaults to the + `index.query.default_field` index settings, which in turn defaults + to `*`. + :arg fuzziness: Maximum edit distance allowed for matching. + :arg fuzzy_rewrite: Method used to rewrite the query. + :arg fuzzy_transpositions: If `true`, edits for fuzzy matching include + transpositions of two adjacent characters (for example, `ab` to + `ba`). Can be applied to the term subqueries constructed for all + terms but the final term. Defaults to `True` if omitted. + :arg lenient: If `true`, format-based errors, such as providing a text + query value for a numeric field, are ignored. + :arg max_expansions: Maximum number of terms to which the query will + expand. Defaults to `50` if omitted. + :arg minimum_should_match: Minimum number of clauses that must match + for a document to be returned. + :arg operator: Boolean logic used to interpret text in the query + value. Defaults to `'or'` if omitted. + :arg prefix_length: Number of beginning characters left unchanged for + fuzzy matching. + :arg slop: Maximum number of positions allowed between matching + tokens. + :arg tie_breaker: Determines how scores for each per-term blended + query and scores across groups are combined. + :arg type: How `the` multi_match query is executed internally. + Defaults to `'best_fields'` if omitted. + :arg zero_terms_query: Indicates whether no documents are returned if + the `analyzer` removes all tokens, such as when using a `stop` + filter. Defaults to `'none'` if omitted. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + name = "multi_match" + def __init__( + self, + *, + query: Union[str, "DefaultType"] = DEFAULT, + analyzer: Union[str, "DefaultType"] = DEFAULT, + auto_generate_synonyms_phrase_query: Union[bool, "DefaultType"] = DEFAULT, + cutoff_frequency: Union[float, "DefaultType"] = DEFAULT, + fields: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + fuzziness: Union[str, int, "DefaultType"] = DEFAULT, + fuzzy_rewrite: Union[str, "DefaultType"] = DEFAULT, + fuzzy_transpositions: Union[bool, "DefaultType"] = DEFAULT, + lenient: Union[bool, "DefaultType"] = DEFAULT, + max_expansions: Union[int, "DefaultType"] = DEFAULT, + minimum_should_match: Union[int, str, "DefaultType"] = DEFAULT, + operator: Union[Literal["and", "or"], "DefaultType"] = DEFAULT, + prefix_length: Union[int, "DefaultType"] = DEFAULT, + slop: Union[int, "DefaultType"] = DEFAULT, + tie_breaker: Union[float, "DefaultType"] = DEFAULT, + type: Union[ + Literal[ + "best_fields", + "most_fields", + "cross_fields", + "phrase", + "phrase_prefix", + "bool_prefix", + ], + "DefaultType", + ] = DEFAULT, + zero_terms_query: Union[Literal["all", "none"], "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + query=query, + analyzer=analyzer, + auto_generate_synonyms_phrase_query=auto_generate_synonyms_phrase_query, + cutoff_frequency=cutoff_frequency, + fields=fields, + fuzziness=fuzziness, + fuzzy_rewrite=fuzzy_rewrite, + fuzzy_transpositions=fuzzy_transpositions, + lenient=lenient, + max_expansions=max_expansions, + minimum_should_match=minimum_should_match, + operator=operator, + prefix_length=prefix_length, + slop=slop, + tie_breaker=tie_breaker, + type=type, + zero_terms_query=zero_terms_query, + boost=boost, + _name=_name, + **kwargs, + ) + + +class Nested(Query): + """ + Wraps another query to search nested fields. If an object matches the + search, the nested query returns the root parent document. + + :arg path: (required) Path to the nested object you wish to search. + :arg query: (required) Query you wish to run on nested objects in the + path. + :arg ignore_unmapped: Indicates whether to ignore an unmapped path and + not return any documents instead of an error. + :arg inner_hits: If defined, each search hit will contain inner hits. + :arg score_mode: How scores for matching child objects affect the root + parent document’s relevance score. Defaults to `'avg'` if omitted. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "nested" + _param_defs = { + "query": {"type": "query"}, + } + + def __init__( + self, + *, + path: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + query: Union[Query, "DefaultType"] = DEFAULT, + ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT, + inner_hits: Union["types.InnerHits", Dict[str, Any], "DefaultType"] = DEFAULT, + score_mode: Union[ + Literal["none", "avg", "sum", "max", "min"], "DefaultType" + ] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + path=path, + query=query, + ignore_unmapped=ignore_unmapped, + inner_hits=inner_hits, + score_mode=score_mode, + boost=boost, + _name=_name, + **kwargs, + ) + + +class ParentId(Query): + """ + Returns child documents joined to a specific parent document. + + :arg id: ID of the parent document. + :arg ignore_unmapped: Indicates whether to ignore an unmapped `type` + and not return any documents instead of an error. + :arg type: Name of the child relationship mapped for the `join` field. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "parent_id" + + def __init__( + self, + *, + id: Union[str, "DefaultType"] = DEFAULT, + ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT, + type: Union[str, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + id=id, + ignore_unmapped=ignore_unmapped, + type=type, + boost=boost, + _name=_name, + **kwargs, + ) + + +class Percolate(Query): + """ + Matches queries stored in an index. + + :arg field: (required) Field that holds the indexed queries. The field + must use the `percolator` mapping type. + :arg document: The source of the document being percolated. + :arg documents: An array of sources of the documents being percolated. + :arg id: The ID of a stored document to percolate. + :arg index: The index of a stored document to percolate. + :arg name: The suffix used for the `_percolator_document_slot` field + when multiple `percolate` queries are specified. + :arg preference: Preference used to fetch document to percolate. + :arg routing: Routing used to fetch document to percolate. + :arg version: The expected version of a stored document to percolate. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "percolate" + + def __init__( + self, + *, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + document: Any = DEFAULT, + documents: Union[Sequence[Any], "DefaultType"] = DEFAULT, + id: Union[str, "DefaultType"] = DEFAULT, + index: Union[str, "DefaultType"] = DEFAULT, + name: Union[str, "DefaultType"] = DEFAULT, + preference: Union[str, "DefaultType"] = DEFAULT, + routing: Union[str, "DefaultType"] = DEFAULT, + version: Union[int, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + field=field, + document=document, + documents=documents, + id=id, + index=index, + name=name, + preference=preference, + routing=routing, + version=version, + boost=boost, + _name=_name, + **kwargs, + ) + + +class Pinned(Query): + """ + Promotes selected documents to rank higher than those matching a given + query. + + :arg organic: (required) Any choice of query used to rank documents + which will be ranked below the "pinned" documents. + :arg ids: Document IDs listed in the order they are to appear in + results. Required if `docs` is not specified. + :arg docs: Documents listed in the order they are to appear in + results. Required if `ids` is not specified. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "pinned" + _param_defs = { + "organic": {"type": "query"}, + } + + def __init__( + self, + *, + organic: Union[Query, "DefaultType"] = DEFAULT, + ids: Union[Sequence[str], "DefaultType"] = DEFAULT, + docs: Union[ + Sequence["types.PinnedDoc"], Dict[str, Any], "DefaultType" + ] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + organic=organic, ids=ids, docs=docs, boost=boost, _name=_name, **kwargs + ) + class Prefix(Query): + """ + Returns documents that contain a specific prefix in a provided field. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ + name = "prefix" + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union["types.PrefixQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) + class QueryString(Query): + """ + Returns documents based on a provided query string, using a parser + with a strict syntax. + + :arg query: (required) Query string you wish to parse and use for + search. + :arg allow_leading_wildcard: If `true`, the wildcard characters `*` + and `?` are allowed as the first character of the query string. + Defaults to `True` if omitted. + :arg analyzer: Analyzer used to convert text in the query string into + tokens. + :arg analyze_wildcard: If `true`, the query attempts to analyze + wildcard terms in the query string. + :arg auto_generate_synonyms_phrase_query: If `true`, match phrase + queries are automatically created for multi-term synonyms. + Defaults to `True` if omitted. + :arg default_field: Default field to search if no field is provided in + the query string. Supports wildcards (`*`). Defaults to the + `index.query.default_field` index setting, which has a default + value of `*`. + :arg default_operator: Default boolean logic used to interpret text in + the query string if no operators are specified. Defaults to `'or'` + if omitted. + :arg enable_position_increments: If `true`, enable position increments + in queries constructed from a `query_string` search. Defaults to + `True` if omitted. + :arg escape: + :arg fields: Array of fields to search. Supports wildcards (`*`). + :arg fuzziness: Maximum edit distance allowed for fuzzy matching. + :arg fuzzy_max_expansions: Maximum number of terms to which the query + expands for fuzzy matching. Defaults to `50` if omitted. + :arg fuzzy_prefix_length: Number of beginning characters left + unchanged for fuzzy matching. + :arg fuzzy_rewrite: Method used to rewrite the query. + :arg fuzzy_transpositions: If `true`, edits for fuzzy matching include + transpositions of two adjacent characters (for example, `ab` to + `ba`). Defaults to `True` if omitted. + :arg lenient: If `true`, format-based errors, such as providing a text + value for a numeric field, are ignored. + :arg max_determinized_states: Maximum number of automaton states + required for the query. Defaults to `10000` if omitted. + :arg minimum_should_match: Minimum number of clauses that must match + for a document to be returned. + :arg phrase_slop: Maximum number of positions allowed between matching + tokens for phrases. + :arg quote_analyzer: Analyzer used to convert quoted text in the query + string into tokens. For quoted text, this parameter overrides the + analyzer specified in the `analyzer` parameter. + :arg quote_field_suffix: Suffix appended to quoted text in the query + string. You can use this suffix to use a different analysis method + for exact matches. + :arg rewrite: Method used to rewrite the query. + :arg tie_breaker: How to combine the queries generated from the + individual search terms in the resulting `dis_max` query. + :arg time_zone: Coordinated Universal Time (UTC) offset or IANA time + zone used to convert date values in the query string to UTC. + :arg type: Determines how the query matches and scores documents. + Defaults to `'best_fields'` if omitted. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + name = "query_string" + def __init__( + self, + *, + query: Union[str, "DefaultType"] = DEFAULT, + allow_leading_wildcard: Union[bool, "DefaultType"] = DEFAULT, + analyzer: Union[str, "DefaultType"] = DEFAULT, + analyze_wildcard: Union[bool, "DefaultType"] = DEFAULT, + auto_generate_synonyms_phrase_query: Union[bool, "DefaultType"] = DEFAULT, + default_field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + default_operator: Union[Literal["and", "or"], "DefaultType"] = DEFAULT, + enable_position_increments: Union[bool, "DefaultType"] = DEFAULT, + escape: Union[bool, "DefaultType"] = DEFAULT, + fields: Union[ + Sequence[Union[str, "InstrumentedField"]], "DefaultType" + ] = DEFAULT, + fuzziness: Union[str, int, "DefaultType"] = DEFAULT, + fuzzy_max_expansions: Union[int, "DefaultType"] = DEFAULT, + fuzzy_prefix_length: Union[int, "DefaultType"] = DEFAULT, + fuzzy_rewrite: Union[str, "DefaultType"] = DEFAULT, + fuzzy_transpositions: Union[bool, "DefaultType"] = DEFAULT, + lenient: Union[bool, "DefaultType"] = DEFAULT, + max_determinized_states: Union[int, "DefaultType"] = DEFAULT, + minimum_should_match: Union[int, str, "DefaultType"] = DEFAULT, + phrase_slop: Union[float, "DefaultType"] = DEFAULT, + quote_analyzer: Union[str, "DefaultType"] = DEFAULT, + quote_field_suffix: Union[str, "DefaultType"] = DEFAULT, + rewrite: Union[str, "DefaultType"] = DEFAULT, + tie_breaker: Union[float, "DefaultType"] = DEFAULT, + time_zone: Union[str, "DefaultType"] = DEFAULT, + type: Union[ + Literal[ + "best_fields", + "most_fields", + "cross_fields", + "phrase", + "phrase_prefix", + "bool_prefix", + ], + "DefaultType", + ] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + query=query, + allow_leading_wildcard=allow_leading_wildcard, + analyzer=analyzer, + analyze_wildcard=analyze_wildcard, + auto_generate_synonyms_phrase_query=auto_generate_synonyms_phrase_query, + default_field=default_field, + default_operator=default_operator, + enable_position_increments=enable_position_increments, + escape=escape, + fields=fields, + fuzziness=fuzziness, + fuzzy_max_expansions=fuzzy_max_expansions, + fuzzy_prefix_length=fuzzy_prefix_length, + fuzzy_rewrite=fuzzy_rewrite, + fuzzy_transpositions=fuzzy_transpositions, + lenient=lenient, + max_determinized_states=max_determinized_states, + minimum_should_match=minimum_should_match, + phrase_slop=phrase_slop, + quote_analyzer=quote_analyzer, + quote_field_suffix=quote_field_suffix, + rewrite=rewrite, + tie_breaker=tie_breaker, + time_zone=time_zone, + type=type, + boost=boost, + _name=_name, + **kwargs, + ) + class Range(Query): + """ + Returns documents that contain terms within a provided range. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ + name = "range" + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union["wrappers.Range[Any]", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) + + +class RankFeature(Query): + """ + Boosts the relevance score of documents based on the numeric value of + a `rank_feature` or `rank_features` field. + + :arg field: (required) `rank_feature` or `rank_features` field used to + boost relevance scores. + :arg saturation: Saturation function used to boost relevance scores + based on the value of the rank feature `field`. + :arg log: Logarithmic function used to boost relevance scores based on + the value of the rank feature `field`. + :arg linear: Linear function used to boost relevance scores based on + the value of the rank feature `field`. + :arg sigmoid: Sigmoid function used to boost relevance scores based on + the value of the rank feature `field`. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "rank_feature" + + def __init__( + self, + *, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + saturation: Union[ + "types.RankFeatureFunctionSaturation", Dict[str, Any], "DefaultType" + ] = DEFAULT, + log: Union[ + "types.RankFeatureFunctionLogarithm", Dict[str, Any], "DefaultType" + ] = DEFAULT, + linear: Union[ + "types.RankFeatureFunctionLinear", Dict[str, Any], "DefaultType" + ] = DEFAULT, + sigmoid: Union[ + "types.RankFeatureFunctionSigmoid", Dict[str, Any], "DefaultType" + ] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + field=field, + saturation=saturation, + log=log, + linear=linear, + sigmoid=sigmoid, + boost=boost, + _name=_name, + **kwargs, + ) + class Regexp(Query): + """ + Returns documents that contain terms matching a regular expression. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ + name = "regexp" + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union["types.RegexpQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) + + +class Rule(Query): + """ + :arg organic: (required) + :arg ruleset_ids: (required) + :arg match_criteria: (required) + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "rule" + _param_defs = { + "organic": {"type": "query"}, + } + + def __init__( + self, + *, + organic: Union[Query, "DefaultType"] = DEFAULT, + ruleset_ids: Union[Sequence[str], "DefaultType"] = DEFAULT, + match_criteria: Any = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + organic=organic, + ruleset_ids=ruleset_ids, + match_criteria=match_criteria, + boost=boost, + _name=_name, + **kwargs, + ) -class Shape(Query): - name = "shape" + +class Script(Query): + """ + Filters documents based on a provided script. The script query is + typically used in a filter context. + + :arg script: (required) Contains a script to run as a query. This + script must return a boolean value, `true` or `false`. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "script" + + def __init__( + self, + *, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(script=script, boost=boost, _name=_name, **kwargs) + + +class ScriptScore(Query): + """ + Uses a script to provide a custom score for returned documents. + + :arg query: (required) Query used to return documents. + :arg script: (required) Script used to compute the score of documents + returned by the query. Important: final relevance scores from the + `script_score` query cannot be negative. + :arg min_score: Documents with a score lower than this floating point + number are excluded from the search results. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "script_score" + _param_defs = { + "query": {"type": "query"}, + } + + def __init__( + self, + *, + query: Union[Query, "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + min_score: Union[float, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + query=query, + script=script, + min_score=min_score, + boost=boost, + _name=_name, + **kwargs, + ) class Semantic(Query): + """ + A semantic query to semantic_text field types + + :arg field: (required) The field to query, which must be a + semantic_text field type + :arg query: (required) The query text + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + name = "semantic" + def __init__( + self, + *, + field: Union[str, "DefaultType"] = DEFAULT, + query: Union[str, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(field=field, query=query, boost=boost, _name=_name, **kwargs) + + +class Shape(Query): + """ + Queries documents that contain fields indexed using the `shape` type. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + :arg ignore_unmapped: When set to `true` the query ignores an unmapped + field and will not match any documents. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "shape" + + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union["types.ShapeFieldQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + *, + ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__( + ignore_unmapped=ignore_unmapped, boost=boost, _name=_name, **kwargs + ) + class SimpleQueryString(Query): + """ + Returns documents based on a provided query string, using a parser + with a limited but fault-tolerant syntax. + + :arg query: (required) Query string in the simple query string syntax + you wish to parse and use for search. + :arg analyzer: Analyzer used to convert text in the query string into + tokens. + :arg analyze_wildcard: If `true`, the query attempts to analyze + wildcard terms in the query string. + :arg auto_generate_synonyms_phrase_query: If `true`, the parser + creates a match_phrase query for each multi-position token. + Defaults to `True` if omitted. + :arg default_operator: Default boolean logic used to interpret text in + the query string if no operators are specified. Defaults to `'or'` + if omitted. + :arg fields: Array of fields you wish to search. Accepts wildcard + expressions. You also can boost relevance scores for matches to + particular fields using a caret (`^`) notation. Defaults to the + `index.query.default_field index` setting, which has a default + value of `*`. + :arg flags: List of enabled operators for the simple query string + syntax. Defaults to `ALL` if omitted. + :arg fuzzy_max_expansions: Maximum number of terms to which the query + expands for fuzzy matching. Defaults to `50` if omitted. + :arg fuzzy_prefix_length: Number of beginning characters left + unchanged for fuzzy matching. + :arg fuzzy_transpositions: If `true`, edits for fuzzy matching include + transpositions of two adjacent characters (for example, `ab` to + `ba`). + :arg lenient: If `true`, format-based errors, such as providing a text + value for a numeric field, are ignored. + :arg minimum_should_match: Minimum number of clauses that must match + for a document to be returned. + :arg quote_field_suffix: Suffix appended to quoted text in the query + string. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + name = "simple_query_string" + def __init__( + self, + *, + query: Union[str, "DefaultType"] = DEFAULT, + analyzer: Union[str, "DefaultType"] = DEFAULT, + analyze_wildcard: Union[bool, "DefaultType"] = DEFAULT, + auto_generate_synonyms_phrase_query: Union[bool, "DefaultType"] = DEFAULT, + default_operator: Union[Literal["and", "or"], "DefaultType"] = DEFAULT, + fields: Union[ + Sequence[Union[str, "InstrumentedField"]], "DefaultType" + ] = DEFAULT, + flags: Union[ + "types.PipeSeparatedFlags", Dict[str, Any], "DefaultType" + ] = DEFAULT, + fuzzy_max_expansions: Union[int, "DefaultType"] = DEFAULT, + fuzzy_prefix_length: Union[int, "DefaultType"] = DEFAULT, + fuzzy_transpositions: Union[bool, "DefaultType"] = DEFAULT, + lenient: Union[bool, "DefaultType"] = DEFAULT, + minimum_should_match: Union[int, str, "DefaultType"] = DEFAULT, + quote_field_suffix: Union[str, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + query=query, + analyzer=analyzer, + analyze_wildcard=analyze_wildcard, + auto_generate_synonyms_phrase_query=auto_generate_synonyms_phrase_query, + default_operator=default_operator, + fields=fields, + flags=flags, + fuzzy_max_expansions=fuzzy_max_expansions, + fuzzy_prefix_length=fuzzy_prefix_length, + fuzzy_transpositions=fuzzy_transpositions, + lenient=lenient, + minimum_should_match=minimum_should_match, + quote_field_suffix=quote_field_suffix, + boost=boost, + _name=_name, + **kwargs, + ) + + +class SpanContaining(Query): + """ + Returns matches which enclose another span query. + + :arg big: (required) Can be any span query. Matching spans from `big` + that contain matches from `little` are returned. + :arg little: (required) Can be any span query. Matching spans from + `big` that contain matches from `little` are returned. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "span_containing" + + def __init__( + self, + *, + big: Union["types.SpanQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + little: Union["types.SpanQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(big=big, little=little, boost=boost, _name=_name, **kwargs) + + +class SpanFieldMasking(Query): + """ + Wrapper to allow span queries to participate in composite single-field + span queries by _lying_ about their search field. + + :arg field: (required) + :arg query: (required) + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "span_field_masking" + + def __init__( + self, + *, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + query: Union["types.SpanQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(field=field, query=query, boost=boost, _name=_name, **kwargs) + + +class SpanFirst(Query): + """ + Matches spans near the beginning of a field. + + :arg end: (required) Controls the maximum end position permitted in a + match. + :arg match: (required) Can be any other span type query. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "span_first" + + def __init__( + self, + *, + end: Union[int, "DefaultType"] = DEFAULT, + match: Union["types.SpanQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(end=end, match=match, boost=boost, _name=_name, **kwargs) + + +class SpanMulti(Query): + """ + Allows you to wrap a multi term query (one of `wildcard`, `fuzzy`, + `prefix`, `range`, or `regexp` query) as a `span` query, so it can be + nested. + + :arg match: (required) Should be a multi term query (one of + `wildcard`, `fuzzy`, `prefix`, `range`, or `regexp` query). + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "span_multi" + _param_defs = { + "match": {"type": "query"}, + } + + def __init__( + self, + *, + match: Union[Query, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(match=match, boost=boost, _name=_name, **kwargs) + + +class SpanNear(Query): + """ + Matches spans which are near one another. You can specify `slop`, the + maximum number of intervening unmatched positions, as well as whether + matches are required to be in-order. + + :arg clauses: (required) Array of one or more other span type queries. + :arg in_order: Controls whether matches are required to be in-order. + :arg slop: Controls the maximum number of intervening unmatched + positions permitted. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "span_near" + + def __init__( + self, + *, + clauses: Union[ + Sequence["types.SpanQuery"], Dict[str, Any], "DefaultType" + ] = DEFAULT, + in_order: Union[bool, "DefaultType"] = DEFAULT, + slop: Union[int, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + clauses=clauses, + in_order=in_order, + slop=slop, + boost=boost, + _name=_name, + **kwargs, + ) + + +class SpanNot(Query): + """ + Removes matches which overlap with another span query or which are + within x tokens before (controlled by the parameter `pre`) or y tokens + after (controlled by the parameter `post`) another span query. + + :arg exclude: (required) Span query whose matches must not overlap + those returned. + :arg include: (required) Span query whose matches are filtered. + :arg dist: The number of tokens from within the include span that + can’t have overlap with the exclude span. Equivalent to setting + both `pre` and `post`. + :arg post: The number of tokens after the include span that can’t have + overlap with the exclude span. + :arg pre: The number of tokens before the include span that can’t have + overlap with the exclude span. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "span_not" + + def __init__( + self, + *, + exclude: Union["types.SpanQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + include: Union["types.SpanQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + dist: Union[int, "DefaultType"] = DEFAULT, + post: Union[int, "DefaultType"] = DEFAULT, + pre: Union[int, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + exclude=exclude, + include=include, + dist=dist, + post=post, + pre=pre, + boost=boost, + _name=_name, + **kwargs, + ) + + +class SpanOr(Query): + """ + Matches the union of its span clauses. + + :arg clauses: (required) Array of one or more other span type queries. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "span_or" + + def __init__( + self, + *, + clauses: Union[ + Sequence["types.SpanQuery"], Dict[str, Any], "DefaultType" + ] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(clauses=clauses, boost=boost, _name=_name, **kwargs) + class SpanTerm(Query): + """ + Matches spans containing a term. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ + name = "span_term" + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union["types.SpanTermQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) + + +class SpanWithin(Query): + """ + Returns matches which are enclosed inside another span query. + + :arg big: (required) Can be any span query. Matching spans from + `little` that are enclosed within `big` are returned. + :arg little: (required) Can be any span query. Matching spans from + `little` that are enclosed within `big` are returned. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "span_within" -class Template(Query): - name = "template" + def __init__( + self, + *, + big: Union["types.SpanQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + little: Union["types.SpanQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(big=big, little=little, boost=boost, _name=_name, **kwargs) + + +class SparseVector(Query): + """ + Using input query vectors or a natural language processing model to + convert a query into a list of token-weight pairs, queries against a + sparse vector field. + + :arg field: (required) The name of the field that contains the token- + weight pairs to be searched against. This field must be a mapped + sparse_vector field. + :arg query_vector: Dictionary of precomputed sparse vectors and their + associated weights. Only one of inference_id or query_vector may + be supplied in a request. + :arg inference_id: The inference ID to use to convert the query text + into token-weight pairs. It must be the same inference ID that was + used to create the tokens from the input text. Only one of + inference_id and query_vector is allowed. If inference_id is + specified, query must also be specified. Only one of inference_id + or query_vector may be supplied in a request. + :arg query: The query text you want to use for search. If inference_id + is specified, query must also be specified. + :arg prune: Whether to perform pruning, omitting the non-significant + tokens from the query to improve query performance. If prune is + true but the pruning_config is not specified, pruning will occur + but default values will be used. Default: false + :arg pruning_config: Optional pruning configuration. If enabled, this + will omit non-significant tokens from the query in order to + improve query performance. This is only used if prune is set to + true. If prune is set to true but pruning_config is not specified, + default values will be used. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "sparse_vector" + + def __init__( + self, + *, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + query_vector: Union[Mapping[str, float], "DefaultType"] = DEFAULT, + inference_id: Union[str, "DefaultType"] = DEFAULT, + query: Union[str, "DefaultType"] = DEFAULT, + prune: Union[bool, "DefaultType"] = DEFAULT, + pruning_config: Union[ + "types.TokenPruningConfig", Dict[str, Any], "DefaultType" + ] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + field=field, + query_vector=query_vector, + inference_id=inference_id, + query=query, + prune=prune, + pruning_config=pruning_config, + boost=boost, + _name=_name, + **kwargs, + ) class Term(Query): + """ + Returns documents that contain an exact term in a provided field. To + return a document, the query term must exactly match the queried + field's value, including whitespace and capitalization. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ + name = "term" + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union["types.TermQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) + class Terms(Query): + """ + Returns documents that contain one or more exact terms in a provided + field. To return a document, one or more terms must exactly match a + field value, including whitespace and capitalization. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + name = "terms" + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union[ + Sequence[Union[int, float, str, bool, None, Any]], + "types.TermsLookup", + Dict[str, Any], + "DefaultType", + ] = DEFAULT, + *, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(boost=boost, _name=_name, **kwargs) + def _setattr(self, name: str, value: Any) -> None: # here we convert any iterables that are not strings to lists if hasattr(value, "__iter__") and not isinstance(value, (str, list)): @@ -558,33 +2653,145 @@ def _setattr(self, name: str, value: Any) -> None: class TermsSet(Query): + """ + Returns documents that contain a minimum number of exact terms in a + provided field. To return a document, a required number of terms must + exactly match the field values, including whitespace and + capitalization. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ + name = "terms_set" + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union["types.TermsSetQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) + class TextExpansion(Query): + """ + Uses a natural language processing model to convert the query text + into a list of token-weight pairs which are then used in a query + against a sparse vector or rank features field. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ + name = "text_expansion" + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union[ + "types.TextExpansionQuery", Dict[str, Any], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) -class Wildcard(Query): - name = "wildcard" +class WeightedTokens(Query): + """ + Supports returning text_expansion query results by sending in + precomputed tokens with the query. -class Script(Query): - name = "script" + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ + name = "weighted_tokens" -class ScriptScore(Query): - name = "script_score" - _param_defs = {"query": {"type": "query"}} + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union[ + "types.WeightedTokensQuery", Dict[str, Any], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) -class Type(Query): - name = "type" +class Wildcard(Query): + """ + Returns documents that contain terms matching a wildcard pattern. + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ -class ParentId(Query): - name = "parent_id" + name = "wildcard" + + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union["types.WildcardQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) class Wrapper(Query): + """ + A query that accepts any other query as base64 encoded string. + + :arg query: (required) A base64 encoded query. The binary data format + can be any of JSON, YAML, CBOR or SMILE encodings + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + name = "wrapper" + + def __init__( + self, + *, + query: Union[str, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(query=query, boost=boost, _name=_name, **kwargs) + + +class Type(Query): + """ + :arg value: (required) + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "type" + + def __init__( + self, + *, + value: Union[str, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(value=value, boost=boost, _name=_name, **kwargs) diff --git a/elasticsearch_dsl/types.py b/elasticsearch_dsl/types.py new file mode 100644 index 00000000..e75770fb --- /dev/null +++ b/elasticsearch_dsl/types.py @@ -0,0 +1,3290 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from typing import Any, Dict, Literal, Mapping, Sequence, Union + +from elastic_transport.client_utils import DEFAULT, DefaultType + +from elasticsearch_dsl import Query, function +from elasticsearch_dsl.document_base import InstrumentedField +from elasticsearch_dsl.utils import AttrDict + +PipeSeparatedFlags = str + + +class QueryBase(AttrDict[Any]): + """ + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(kwargs) + + +class CommonTermsQuery(QueryBase): + """ + :arg query: (required) + :arg analyzer: + :arg cutoff_frequency: + :arg high_freq_operator: + :arg low_freq_operator: + :arg minimum_should_match: + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + query: Union[str, DefaultType] + analyzer: Union[str, DefaultType] + cutoff_frequency: Union[float, DefaultType] + high_freq_operator: Union[Literal["and", "or"], DefaultType] + low_freq_operator: Union[Literal["and", "or"], DefaultType] + minimum_should_match: Union[int, str, DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + query: Union[str, DefaultType] = DEFAULT, + analyzer: Union[str, DefaultType] = DEFAULT, + cutoff_frequency: Union[float, DefaultType] = DEFAULT, + high_freq_operator: Union[Literal["and", "or"], DefaultType] = DEFAULT, + low_freq_operator: Union[Literal["and", "or"], DefaultType] = DEFAULT, + minimum_should_match: Union[int, str, DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if query is not DEFAULT: + kwargs["query"] = query + if analyzer is not DEFAULT: + kwargs["analyzer"] = analyzer + if cutoff_frequency is not DEFAULT: + kwargs["cutoff_frequency"] = cutoff_frequency + if high_freq_operator is not DEFAULT: + kwargs["high_freq_operator"] = high_freq_operator + if low_freq_operator is not DEFAULT: + kwargs["low_freq_operator"] = low_freq_operator + if minimum_should_match is not DEFAULT: + kwargs["minimum_should_match"] = minimum_should_match + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(**kwargs) + + +class CoordsGeoBounds(AttrDict[Any]): + """ + :arg top: (required) + :arg bottom: (required) + :arg left: (required) + :arg right: (required) + """ + + top: Union[float, DefaultType] + bottom: Union[float, DefaultType] + left: Union[float, DefaultType] + right: Union[float, DefaultType] + + def __init__( + self, + *, + top: Union[float, DefaultType] = DEFAULT, + bottom: Union[float, DefaultType] = DEFAULT, + left: Union[float, DefaultType] = DEFAULT, + right: Union[float, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if top is not DEFAULT: + kwargs["top"] = top + if bottom is not DEFAULT: + kwargs["bottom"] = bottom + if left is not DEFAULT: + kwargs["left"] = left + if right is not DEFAULT: + kwargs["right"] = right + super().__init__(kwargs) + + +class FunctionScoreContainer(AttrDict[Any]): + """ + :arg exp: Function that scores a document with a exponential decay, + depending on the distance of a numeric field value of the document + from an origin. + :arg gauss: Function that scores a document with a normal decay, + depending on the distance of a numeric field value of the document + from an origin. + :arg linear: Function that scores a document with a linear decay, + depending on the distance of a numeric field value of the document + from an origin. + :arg field_value_factor: Function allows you to use a field from a + document to influence the score. It’s similar to using the + script_score function, however, it avoids the overhead of + scripting. + :arg random_score: Generates scores that are uniformly distributed + from 0 up to but not including 1. In case you want scores to be + reproducible, it is possible to provide a `seed` and `field`. + :arg script_score: Enables you to wrap another query and customize the + scoring of it optionally with a computation derived from other + numeric field values in the doc using a script expression. + :arg filter: + :arg weight: + """ + + exp: Union[function.DecayFunction, DefaultType] + gauss: Union[function.DecayFunction, DefaultType] + linear: Union[function.DecayFunction, DefaultType] + field_value_factor: Union[function.FieldValueFactorScore, DefaultType] + random_score: Union[function.RandomScore, DefaultType] + script_score: Union[function.ScriptScore, DefaultType] + filter: Union[Query, DefaultType] + weight: Union[float, DefaultType] + + def __init__( + self, + *, + exp: Union[function.DecayFunction, DefaultType] = DEFAULT, + gauss: Union[function.DecayFunction, DefaultType] = DEFAULT, + linear: Union[function.DecayFunction, DefaultType] = DEFAULT, + field_value_factor: Union[ + function.FieldValueFactorScore, DefaultType + ] = DEFAULT, + random_score: Union[function.RandomScore, DefaultType] = DEFAULT, + script_score: Union[function.ScriptScore, DefaultType] = DEFAULT, + filter: Union[Query, DefaultType] = DEFAULT, + weight: Union[float, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if exp is not DEFAULT: + kwargs["exp"] = exp + if gauss is not DEFAULT: + kwargs["gauss"] = gauss + if linear is not DEFAULT: + kwargs["linear"] = linear + if field_value_factor is not DEFAULT: + kwargs["field_value_factor"] = field_value_factor + if random_score is not DEFAULT: + kwargs["random_score"] = random_score + if script_score is not DEFAULT: + kwargs["script_score"] = script_score + if filter is not DEFAULT: + kwargs["filter"] = filter + if weight is not DEFAULT: + kwargs["weight"] = weight + super().__init__(kwargs) + + +class FuzzyQuery(QueryBase): + """ + :arg value: (required) Term you wish to find in the provided field. + :arg max_expansions: Maximum number of variations created. Defaults to + `50` if omitted. + :arg prefix_length: Number of beginning characters left unchanged when + creating expansions. + :arg rewrite: Number of beginning characters left unchanged when + creating expansions. Defaults to `constant_score` if omitted. + :arg transpositions: Indicates whether edits include transpositions of + two adjacent characters (for example `ab` to `ba`). Defaults to + `True` if omitted. + :arg fuzziness: Maximum edit distance allowed for matching. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + value: Union[str, float, bool, DefaultType] + max_expansions: Union[int, DefaultType] + prefix_length: Union[int, DefaultType] + rewrite: Union[str, DefaultType] + transpositions: Union[bool, DefaultType] + fuzziness: Union[str, int, DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + value: Union[str, float, bool, DefaultType] = DEFAULT, + max_expansions: Union[int, DefaultType] = DEFAULT, + prefix_length: Union[int, DefaultType] = DEFAULT, + rewrite: Union[str, DefaultType] = DEFAULT, + transpositions: Union[bool, DefaultType] = DEFAULT, + fuzziness: Union[str, int, DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if value is not DEFAULT: + kwargs["value"] = value + if max_expansions is not DEFAULT: + kwargs["max_expansions"] = max_expansions + if prefix_length is not DEFAULT: + kwargs["prefix_length"] = prefix_length + if rewrite is not DEFAULT: + kwargs["rewrite"] = rewrite + if transpositions is not DEFAULT: + kwargs["transpositions"] = transpositions + if fuzziness is not DEFAULT: + kwargs["fuzziness"] = fuzziness + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(**kwargs) + + +class GeoHashLocation(AttrDict[Any]): + """ + :arg geohash: (required) + """ + + geohash: Union[str, DefaultType] + + def __init__(self, *, geohash: Union[str, DefaultType] = DEFAULT, **kwargs: Any): + if geohash is not DEFAULT: + kwargs["geohash"] = geohash + super().__init__(kwargs) + + +class GeoPolygonPoints(AttrDict[Any]): + """ + :arg points: (required) + """ + + points: Union[ + Sequence[Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str]], + Dict[str, Any], + DefaultType, + ] + + def __init__( + self, + *, + points: Union[ + Sequence[ + Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str] + ], + Dict[str, Any], + DefaultType, + ] = DEFAULT, + **kwargs: Any, + ): + if points is not DEFAULT: + kwargs["points"] = points + super().__init__(kwargs) + + +class GeoShapeFieldQuery(AttrDict[Any]): + """ + :arg shape: + :arg indexed_shape: Query using an indexed shape retrieved from the + the specified document and path. + :arg relation: Spatial relation operator used to search a geo field. + Defaults to `intersects` if omitted. + """ + + shape: Any + indexed_shape: Union["FieldLookup", Dict[str, Any], DefaultType] + relation: Union[ + Literal["intersects", "disjoint", "within", "contains"], DefaultType + ] + + def __init__( + self, + *, + shape: Any = DEFAULT, + indexed_shape: Union["FieldLookup", Dict[str, Any], DefaultType] = DEFAULT, + relation: Union[ + Literal["intersects", "disjoint", "within", "contains"], DefaultType + ] = DEFAULT, + **kwargs: Any, + ): + if shape is not DEFAULT: + kwargs["shape"] = shape + if indexed_shape is not DEFAULT: + kwargs["indexed_shape"] = indexed_shape + if relation is not DEFAULT: + kwargs["relation"] = relation + super().__init__(kwargs) + + +class InnerHits(AttrDict[Any]): + """ + :arg name: The name for the particular inner hit definition in the + response. Useful when a search request contains multiple inner + hits. + :arg size: The maximum number of hits to return per `inner_hits`. + Defaults to `3` if omitted. + :arg from: Inner hit starting document offset. + :arg collapse: + :arg docvalue_fields: + :arg explain: + :arg highlight: + :arg ignore_unmapped: + :arg script_fields: + :arg seq_no_primary_term: + :arg fields: + :arg sort: How the inner hits should be sorted per `inner_hits`. By + default, inner hits are sorted by score. + :arg _source: + :arg stored_fields: + :arg track_scores: + :arg version: + """ + + name: Union[str, DefaultType] + size: Union[int, DefaultType] + from_: Union[int, DefaultType] + collapse: Union["FieldCollapse", Dict[str, Any], DefaultType] + docvalue_fields: Union[Sequence["FieldAndFormat"], Dict[str, Any], DefaultType] + explain: Union[bool, DefaultType] + highlight: Union["Highlight", Dict[str, Any], DefaultType] + ignore_unmapped: Union[bool, DefaultType] + script_fields: Union[ + Mapping[Union[str, InstrumentedField], "ScriptField"], + Dict[str, Any], + DefaultType, + ] + seq_no_primary_term: Union[bool, DefaultType] + fields: Union[ + Union[str, InstrumentedField], + Sequence[Union[str, InstrumentedField]], + DefaultType, + ] + sort: Union[ + Union[Union[str, InstrumentedField], "SortOptions"], + Sequence[Union[Union[str, InstrumentedField], "SortOptions"]], + Dict[str, Any], + DefaultType, + ] + _source: Union[bool, "SourceFilter", Dict[str, Any], DefaultType] + stored_fields: Union[ + Union[str, InstrumentedField], + Sequence[Union[str, InstrumentedField]], + DefaultType, + ] + track_scores: Union[bool, DefaultType] + version: Union[bool, DefaultType] + + def __init__( + self, + *, + name: Union[str, DefaultType] = DEFAULT, + size: Union[int, DefaultType] = DEFAULT, + from_: Union[int, DefaultType] = DEFAULT, + collapse: Union["FieldCollapse", Dict[str, Any], DefaultType] = DEFAULT, + docvalue_fields: Union[ + Sequence["FieldAndFormat"], Dict[str, Any], DefaultType + ] = DEFAULT, + explain: Union[bool, DefaultType] = DEFAULT, + highlight: Union["Highlight", Dict[str, Any], DefaultType] = DEFAULT, + ignore_unmapped: Union[bool, DefaultType] = DEFAULT, + script_fields: Union[ + Mapping[Union[str, InstrumentedField], "ScriptField"], + Dict[str, Any], + DefaultType, + ] = DEFAULT, + seq_no_primary_term: Union[bool, DefaultType] = DEFAULT, + fields: Union[ + Union[str, InstrumentedField], + Sequence[Union[str, InstrumentedField]], + DefaultType, + ] = DEFAULT, + sort: Union[ + Union[Union[str, InstrumentedField], "SortOptions"], + Sequence[Union[Union[str, InstrumentedField], "SortOptions"]], + Dict[str, Any], + DefaultType, + ] = DEFAULT, + _source: Union[bool, "SourceFilter", Dict[str, Any], DefaultType] = DEFAULT, + stored_fields: Union[ + Union[str, InstrumentedField], + Sequence[Union[str, InstrumentedField]], + DefaultType, + ] = DEFAULT, + track_scores: Union[bool, DefaultType] = DEFAULT, + version: Union[bool, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if name is not DEFAULT: + kwargs["name"] = name + if size is not DEFAULT: + kwargs["size"] = size + if from_ is not DEFAULT: + kwargs["from_"] = from_ + if collapse is not DEFAULT: + kwargs["collapse"] = collapse + if docvalue_fields is not DEFAULT: + kwargs["docvalue_fields"] = docvalue_fields + if explain is not DEFAULT: + kwargs["explain"] = explain + if highlight is not DEFAULT: + kwargs["highlight"] = highlight + if ignore_unmapped is not DEFAULT: + kwargs["ignore_unmapped"] = ignore_unmapped + if script_fields is not DEFAULT: + kwargs["script_fields"] = str(script_fields) + if seq_no_primary_term is not DEFAULT: + kwargs["seq_no_primary_term"] = seq_no_primary_term + if fields is not DEFAULT: + kwargs["fields"] = str(fields) + if sort is not DEFAULT: + kwargs["sort"] = str(sort) + if _source is not DEFAULT: + kwargs["_source"] = _source + if stored_fields is not DEFAULT: + kwargs["stored_fields"] = str(stored_fields) + if track_scores is not DEFAULT: + kwargs["track_scores"] = track_scores + if version is not DEFAULT: + kwargs["version"] = version + super().__init__(kwargs) + + +class IntervalsQuery(QueryBase): + """ + :arg all_of: Returns matches that span a combination of other rules. + :arg any_of: Returns intervals produced by any of its sub-rules. + :arg fuzzy: Matches terms that are similar to the provided term, + within an edit distance defined by `fuzziness`. + :arg match: Matches analyzed text. + :arg prefix: Matches terms that start with a specified set of + characters. + :arg wildcard: Matches terms using a wildcard pattern. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + all_of: Union["IntervalsAllOf", Dict[str, Any], DefaultType] + any_of: Union["IntervalsAnyOf", Dict[str, Any], DefaultType] + fuzzy: Union["IntervalsFuzzy", Dict[str, Any], DefaultType] + match: Union["IntervalsMatch", Dict[str, Any], DefaultType] + prefix: Union["IntervalsPrefix", Dict[str, Any], DefaultType] + wildcard: Union["IntervalsWildcard", Dict[str, Any], DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + all_of: Union["IntervalsAllOf", Dict[str, Any], DefaultType] = DEFAULT, + any_of: Union["IntervalsAnyOf", Dict[str, Any], DefaultType] = DEFAULT, + fuzzy: Union["IntervalsFuzzy", Dict[str, Any], DefaultType] = DEFAULT, + match: Union["IntervalsMatch", Dict[str, Any], DefaultType] = DEFAULT, + prefix: Union["IntervalsPrefix", Dict[str, Any], DefaultType] = DEFAULT, + wildcard: Union["IntervalsWildcard", Dict[str, Any], DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if all_of is not DEFAULT: + kwargs["all_of"] = all_of + if any_of is not DEFAULT: + kwargs["any_of"] = any_of + if fuzzy is not DEFAULT: + kwargs["fuzzy"] = fuzzy + if match is not DEFAULT: + kwargs["match"] = match + if prefix is not DEFAULT: + kwargs["prefix"] = prefix + if wildcard is not DEFAULT: + kwargs["wildcard"] = wildcard + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(**kwargs) + + +class LatLonGeoLocation(AttrDict[Any]): + """ + :arg lat: (required) Latitude + :arg lon: (required) Longitude + """ + + lat: Union[float, DefaultType] + lon: Union[float, DefaultType] + + def __init__( + self, + *, + lat: Union[float, DefaultType] = DEFAULT, + lon: Union[float, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if lat is not DEFAULT: + kwargs["lat"] = lat + if lon is not DEFAULT: + kwargs["lon"] = lon + super().__init__(kwargs) + + +class LikeDocument(AttrDict[Any]): + """ + :arg doc: A document not present in the index. + :arg fields: + :arg _id: ID of a document. + :arg _index: Index of a document. + :arg per_field_analyzer: Overrides the default analyzer. + :arg routing: + :arg version: + :arg version_type: Defaults to `'internal'` if omitted. + """ + + doc: Any + fields: Union[Sequence[Union[str, InstrumentedField]], DefaultType] + _id: Union[str, DefaultType] + _index: Union[str, DefaultType] + per_field_analyzer: Union[Mapping[Union[str, InstrumentedField], str], DefaultType] + routing: Union[str, DefaultType] + version: Union[int, DefaultType] + version_type: Union[ + Literal["internal", "external", "external_gte", "force"], DefaultType + ] + + def __init__( + self, + *, + doc: Any = DEFAULT, + fields: Union[Sequence[Union[str, InstrumentedField]], DefaultType] = DEFAULT, + _id: Union[str, DefaultType] = DEFAULT, + _index: Union[str, DefaultType] = DEFAULT, + per_field_analyzer: Union[ + Mapping[Union[str, InstrumentedField], str], DefaultType + ] = DEFAULT, + routing: Union[str, DefaultType] = DEFAULT, + version: Union[int, DefaultType] = DEFAULT, + version_type: Union[ + Literal["internal", "external", "external_gte", "force"], DefaultType + ] = DEFAULT, + **kwargs: Any, + ): + if doc is not DEFAULT: + kwargs["doc"] = doc + if fields is not DEFAULT: + kwargs["fields"] = str(fields) + if _id is not DEFAULT: + kwargs["_id"] = _id + if _index is not DEFAULT: + kwargs["_index"] = _index + if per_field_analyzer is not DEFAULT: + kwargs["per_field_analyzer"] = str(per_field_analyzer) + if routing is not DEFAULT: + kwargs["routing"] = routing + if version is not DEFAULT: + kwargs["version"] = version + if version_type is not DEFAULT: + kwargs["version_type"] = version_type + super().__init__(kwargs) + + +class MatchBoolPrefixQuery(QueryBase): + """ + :arg query: (required) Terms you wish to find in the provided field. + The last term is used in a prefix query. + :arg analyzer: Analyzer used to convert the text in the query value + into tokens. + :arg fuzziness: Maximum edit distance allowed for matching. Can be + applied to the term subqueries constructed for all terms but the + final term. + :arg fuzzy_rewrite: Method used to rewrite the query. Can be applied + to the term subqueries constructed for all terms but the final + term. + :arg fuzzy_transpositions: If `true`, edits for fuzzy matching include + transpositions of two adjacent characters (for example, `ab` to + `ba`). Can be applied to the term subqueries constructed for all + terms but the final term. Defaults to `True` if omitted. + :arg max_expansions: Maximum number of terms to which the query will + expand. Can be applied to the term subqueries constructed for all + terms but the final term. Defaults to `50` if omitted. + :arg minimum_should_match: Minimum number of clauses that must match + for a document to be returned. Applied to the constructed bool + query. + :arg operator: Boolean logic used to interpret text in the query + value. Applied to the constructed bool query. Defaults to `'or'` + if omitted. + :arg prefix_length: Number of beginning characters left unchanged for + fuzzy matching. Can be applied to the term subqueries constructed + for all terms but the final term. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + query: Union[str, DefaultType] + analyzer: Union[str, DefaultType] + fuzziness: Union[str, int, DefaultType] + fuzzy_rewrite: Union[str, DefaultType] + fuzzy_transpositions: Union[bool, DefaultType] + max_expansions: Union[int, DefaultType] + minimum_should_match: Union[int, str, DefaultType] + operator: Union[Literal["and", "or"], DefaultType] + prefix_length: Union[int, DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + query: Union[str, DefaultType] = DEFAULT, + analyzer: Union[str, DefaultType] = DEFAULT, + fuzziness: Union[str, int, DefaultType] = DEFAULT, + fuzzy_rewrite: Union[str, DefaultType] = DEFAULT, + fuzzy_transpositions: Union[bool, DefaultType] = DEFAULT, + max_expansions: Union[int, DefaultType] = DEFAULT, + minimum_should_match: Union[int, str, DefaultType] = DEFAULT, + operator: Union[Literal["and", "or"], DefaultType] = DEFAULT, + prefix_length: Union[int, DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if query is not DEFAULT: + kwargs["query"] = query + if analyzer is not DEFAULT: + kwargs["analyzer"] = analyzer + if fuzziness is not DEFAULT: + kwargs["fuzziness"] = fuzziness + if fuzzy_rewrite is not DEFAULT: + kwargs["fuzzy_rewrite"] = fuzzy_rewrite + if fuzzy_transpositions is not DEFAULT: + kwargs["fuzzy_transpositions"] = fuzzy_transpositions + if max_expansions is not DEFAULT: + kwargs["max_expansions"] = max_expansions + if minimum_should_match is not DEFAULT: + kwargs["minimum_should_match"] = minimum_should_match + if operator is not DEFAULT: + kwargs["operator"] = operator + if prefix_length is not DEFAULT: + kwargs["prefix_length"] = prefix_length + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(**kwargs) + + +class MatchPhrasePrefixQuery(QueryBase): + """ + :arg query: (required) Text you wish to find in the provided field. + :arg analyzer: Analyzer used to convert text in the query value into + tokens. + :arg max_expansions: Maximum number of terms to which the last + provided term of the query value will expand. Defaults to `50` if + omitted. + :arg slop: Maximum number of positions allowed between matching + tokens. + :arg zero_terms_query: Indicates whether no documents are returned if + the analyzer removes all tokens, such as when using a `stop` + filter. Defaults to `none` if omitted. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + query: Union[str, DefaultType] + analyzer: Union[str, DefaultType] + max_expansions: Union[int, DefaultType] + slop: Union[int, DefaultType] + zero_terms_query: Union[Literal["all", "none"], DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + query: Union[str, DefaultType] = DEFAULT, + analyzer: Union[str, DefaultType] = DEFAULT, + max_expansions: Union[int, DefaultType] = DEFAULT, + slop: Union[int, DefaultType] = DEFAULT, + zero_terms_query: Union[Literal["all", "none"], DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if query is not DEFAULT: + kwargs["query"] = query + if analyzer is not DEFAULT: + kwargs["analyzer"] = analyzer + if max_expansions is not DEFAULT: + kwargs["max_expansions"] = max_expansions + if slop is not DEFAULT: + kwargs["slop"] = slop + if zero_terms_query is not DEFAULT: + kwargs["zero_terms_query"] = zero_terms_query + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(**kwargs) + + +class MatchPhraseQuery(QueryBase): + """ + :arg query: (required) Query terms that are analyzed and turned into a + phrase query. + :arg analyzer: Analyzer used to convert the text in the query value + into tokens. + :arg slop: Maximum number of positions allowed between matching + tokens. + :arg zero_terms_query: Indicates whether no documents are returned if + the `analyzer` removes all tokens, such as when using a `stop` + filter. Defaults to `'none'` if omitted. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + query: Union[str, DefaultType] + analyzer: Union[str, DefaultType] + slop: Union[int, DefaultType] + zero_terms_query: Union[Literal["all", "none"], DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + query: Union[str, DefaultType] = DEFAULT, + analyzer: Union[str, DefaultType] = DEFAULT, + slop: Union[int, DefaultType] = DEFAULT, + zero_terms_query: Union[Literal["all", "none"], DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if query is not DEFAULT: + kwargs["query"] = query + if analyzer is not DEFAULT: + kwargs["analyzer"] = analyzer + if slop is not DEFAULT: + kwargs["slop"] = slop + if zero_terms_query is not DEFAULT: + kwargs["zero_terms_query"] = zero_terms_query + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(**kwargs) + + +class MatchQuery(QueryBase): + """ + :arg query: (required) Text, number, boolean value or date you wish to + find in the provided field. + :arg analyzer: Analyzer used to convert the text in the query value + into tokens. + :arg auto_generate_synonyms_phrase_query: If `true`, match phrase + queries are automatically created for multi-term synonyms. + Defaults to `True` if omitted. + :arg cutoff_frequency: + :arg fuzziness: Maximum edit distance allowed for matching. + :arg fuzzy_rewrite: Method used to rewrite the query. + :arg fuzzy_transpositions: If `true`, edits for fuzzy matching include + transpositions of two adjacent characters (for example, `ab` to + `ba`). Defaults to `True` if omitted. + :arg lenient: If `true`, format-based errors, such as providing a text + query value for a numeric field, are ignored. + :arg max_expansions: Maximum number of terms to which the query will + expand. Defaults to `50` if omitted. + :arg minimum_should_match: Minimum number of clauses that must match + for a document to be returned. + :arg operator: Boolean logic used to interpret text in the query + value. Defaults to `'or'` if omitted. + :arg prefix_length: Number of beginning characters left unchanged for + fuzzy matching. + :arg zero_terms_query: Indicates whether no documents are returned if + the `analyzer` removes all tokens, such as when using a `stop` + filter. Defaults to `'none'` if omitted. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + query: Union[str, float, bool, DefaultType] + analyzer: Union[str, DefaultType] + auto_generate_synonyms_phrase_query: Union[bool, DefaultType] + cutoff_frequency: Union[float, DefaultType] + fuzziness: Union[str, int, DefaultType] + fuzzy_rewrite: Union[str, DefaultType] + fuzzy_transpositions: Union[bool, DefaultType] + lenient: Union[bool, DefaultType] + max_expansions: Union[int, DefaultType] + minimum_should_match: Union[int, str, DefaultType] + operator: Union[Literal["and", "or"], DefaultType] + prefix_length: Union[int, DefaultType] + zero_terms_query: Union[Literal["all", "none"], DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + query: Union[str, float, bool, DefaultType] = DEFAULT, + analyzer: Union[str, DefaultType] = DEFAULT, + auto_generate_synonyms_phrase_query: Union[bool, DefaultType] = DEFAULT, + cutoff_frequency: Union[float, DefaultType] = DEFAULT, + fuzziness: Union[str, int, DefaultType] = DEFAULT, + fuzzy_rewrite: Union[str, DefaultType] = DEFAULT, + fuzzy_transpositions: Union[bool, DefaultType] = DEFAULT, + lenient: Union[bool, DefaultType] = DEFAULT, + max_expansions: Union[int, DefaultType] = DEFAULT, + minimum_should_match: Union[int, str, DefaultType] = DEFAULT, + operator: Union[Literal["and", "or"], DefaultType] = DEFAULT, + prefix_length: Union[int, DefaultType] = DEFAULT, + zero_terms_query: Union[Literal["all", "none"], DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if query is not DEFAULT: + kwargs["query"] = query + if analyzer is not DEFAULT: + kwargs["analyzer"] = analyzer + if auto_generate_synonyms_phrase_query is not DEFAULT: + kwargs["auto_generate_synonyms_phrase_query"] = ( + auto_generate_synonyms_phrase_query + ) + if cutoff_frequency is not DEFAULT: + kwargs["cutoff_frequency"] = cutoff_frequency + if fuzziness is not DEFAULT: + kwargs["fuzziness"] = fuzziness + if fuzzy_rewrite is not DEFAULT: + kwargs["fuzzy_rewrite"] = fuzzy_rewrite + if fuzzy_transpositions is not DEFAULT: + kwargs["fuzzy_transpositions"] = fuzzy_transpositions + if lenient is not DEFAULT: + kwargs["lenient"] = lenient + if max_expansions is not DEFAULT: + kwargs["max_expansions"] = max_expansions + if minimum_should_match is not DEFAULT: + kwargs["minimum_should_match"] = minimum_should_match + if operator is not DEFAULT: + kwargs["operator"] = operator + if prefix_length is not DEFAULT: + kwargs["prefix_length"] = prefix_length + if zero_terms_query is not DEFAULT: + kwargs["zero_terms_query"] = zero_terms_query + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(**kwargs) + + +class PinnedDoc(AttrDict[Any]): + """ + :arg _id: (required) The unique document ID. + :arg _index: (required) The index that contains the document. + """ + + _id: Union[str, DefaultType] + _index: Union[str, DefaultType] + + def __init__( + self, + *, + _id: Union[str, DefaultType] = DEFAULT, + _index: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if _id is not DEFAULT: + kwargs["_id"] = _id + if _index is not DEFAULT: + kwargs["_index"] = _index + super().__init__(kwargs) + + +class PrefixQuery(QueryBase): + """ + :arg value: (required) Beginning characters of terms you wish to find + in the provided field. + :arg rewrite: Method used to rewrite the query. + :arg case_insensitive: Allows ASCII case insensitive matching of the + value with the indexed field values when set to `true`. Default is + `false` which means the case sensitivity of matching depends on + the underlying field’s mapping. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + value: Union[str, DefaultType] + rewrite: Union[str, DefaultType] + case_insensitive: Union[bool, DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + value: Union[str, DefaultType] = DEFAULT, + rewrite: Union[str, DefaultType] = DEFAULT, + case_insensitive: Union[bool, DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if value is not DEFAULT: + kwargs["value"] = value + if rewrite is not DEFAULT: + kwargs["rewrite"] = rewrite + if case_insensitive is not DEFAULT: + kwargs["case_insensitive"] = case_insensitive + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(**kwargs) + + +class QueryVectorBuilder(AttrDict[Any]): + """ + :arg text_embedding: + """ + + text_embedding: Union["TextEmbedding", Dict[str, Any], DefaultType] + + def __init__( + self, + *, + text_embedding: Union["TextEmbedding", Dict[str, Any], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if text_embedding is not DEFAULT: + kwargs["text_embedding"] = text_embedding + super().__init__(kwargs) + + +class RankFeatureFunction(AttrDict[Any]): + pass + + +class RankFeatureFunctionLinear(RankFeatureFunction): + pass + + +class RankFeatureFunctionLogarithm(RankFeatureFunction): + """ + :arg scaling_factor: (required) Configurable scaling factor. + """ + + scaling_factor: Union[float, DefaultType] + + def __init__( + self, *, scaling_factor: Union[float, DefaultType] = DEFAULT, **kwargs: Any + ): + if scaling_factor is not DEFAULT: + kwargs["scaling_factor"] = scaling_factor + super().__init__(**kwargs) + + +class RankFeatureFunctionSaturation(RankFeatureFunction): + """ + :arg pivot: Configurable pivot value so that the result will be less + than 0.5. + """ + + pivot: Union[float, DefaultType] + + def __init__(self, *, pivot: Union[float, DefaultType] = DEFAULT, **kwargs: Any): + if pivot is not DEFAULT: + kwargs["pivot"] = pivot + super().__init__(**kwargs) + + +class RankFeatureFunctionSigmoid(RankFeatureFunction): + """ + :arg pivot: (required) Configurable pivot value so that the result + will be less than 0.5. + :arg exponent: (required) Configurable Exponent. + """ + + pivot: Union[float, DefaultType] + exponent: Union[float, DefaultType] + + def __init__( + self, + *, + pivot: Union[float, DefaultType] = DEFAULT, + exponent: Union[float, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if pivot is not DEFAULT: + kwargs["pivot"] = pivot + if exponent is not DEFAULT: + kwargs["exponent"] = exponent + super().__init__(**kwargs) + + +class RegexpQuery(QueryBase): + """ + :arg value: (required) Regular expression for terms you wish to find + in the provided field. + :arg case_insensitive: Allows case insensitive matching of the regular + expression value with the indexed field values when set to `true`. + When `false`, case sensitivity of matching depends on the + underlying field’s mapping. + :arg flags: Enables optional operators for the regular expression. + :arg max_determinized_states: Maximum number of automaton states + required for the query. Defaults to `10000` if omitted. + :arg rewrite: Method used to rewrite the query. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + value: Union[str, DefaultType] + case_insensitive: Union[bool, DefaultType] + flags: Union[str, DefaultType] + max_determinized_states: Union[int, DefaultType] + rewrite: Union[str, DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + value: Union[str, DefaultType] = DEFAULT, + case_insensitive: Union[bool, DefaultType] = DEFAULT, + flags: Union[str, DefaultType] = DEFAULT, + max_determinized_states: Union[int, DefaultType] = DEFAULT, + rewrite: Union[str, DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if value is not DEFAULT: + kwargs["value"] = value + if case_insensitive is not DEFAULT: + kwargs["case_insensitive"] = case_insensitive + if flags is not DEFAULT: + kwargs["flags"] = flags + if max_determinized_states is not DEFAULT: + kwargs["max_determinized_states"] = max_determinized_states + if rewrite is not DEFAULT: + kwargs["rewrite"] = rewrite + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(**kwargs) + + +class Script(AttrDict[Any]): + """ + :arg source: The script source. + :arg id: The `id` for a stored script. + :arg params: Specifies any named parameters that are passed into the + script as variables. Use parameters instead of hard-coded values + to decrease compile time. + :arg lang: Specifies the language the script is written in. Defaults + to `painless` if omitted. + :arg options: + """ + + source: Union[str, DefaultType] + id: Union[str, DefaultType] + params: Union[Mapping[str, Any], DefaultType] + lang: Union[Literal["painless", "expression", "mustache", "java"], DefaultType] + options: Union[Mapping[str, str], DefaultType] + + def __init__( + self, + *, + source: Union[str, DefaultType] = DEFAULT, + id: Union[str, DefaultType] = DEFAULT, + params: Union[Mapping[str, Any], DefaultType] = DEFAULT, + lang: Union[ + Literal["painless", "expression", "mustache", "java"], DefaultType + ] = DEFAULT, + options: Union[Mapping[str, str], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if source is not DEFAULT: + kwargs["source"] = source + if id is not DEFAULT: + kwargs["id"] = id + if params is not DEFAULT: + kwargs["params"] = params + if lang is not DEFAULT: + kwargs["lang"] = lang + if options is not DEFAULT: + kwargs["options"] = options + super().__init__(kwargs) + + +class ShapeFieldQuery(AttrDict[Any]): + """ + :arg indexed_shape: Queries using a pre-indexed shape. + :arg relation: Spatial relation between the query shape and the + document shape. + :arg shape: Queries using an inline shape definition in GeoJSON or + Well Known Text (WKT) format. + """ + + indexed_shape: Union["FieldLookup", Dict[str, Any], DefaultType] + relation: Union[ + Literal["intersects", "disjoint", "within", "contains"], DefaultType + ] + shape: Any + + def __init__( + self, + *, + indexed_shape: Union["FieldLookup", Dict[str, Any], DefaultType] = DEFAULT, + relation: Union[ + Literal["intersects", "disjoint", "within", "contains"], DefaultType + ] = DEFAULT, + shape: Any = DEFAULT, + **kwargs: Any, + ): + if indexed_shape is not DEFAULT: + kwargs["indexed_shape"] = indexed_shape + if relation is not DEFAULT: + kwargs["relation"] = relation + if shape is not DEFAULT: + kwargs["shape"] = shape + super().__init__(kwargs) + + +class SpanQuery(AttrDict[Any]): + """ + :arg span_containing: Accepts a list of span queries, but only returns + those spans which also match a second span query. + :arg span_field_masking: Allows queries like `span_near` or `span_or` + across different fields. + :arg span_first: Accepts another span query whose matches must appear + within the first N positions of the field. + :arg span_gap: + :arg span_multi: Wraps a `term`, `range`, `prefix`, `wildcard`, + `regexp`, or `fuzzy` query. + :arg span_near: Accepts multiple span queries whose matches must be + within the specified distance of each other, and possibly in the + same order. + :arg span_not: Wraps another span query, and excludes any documents + which match that query. + :arg span_or: Combines multiple span queries and returns documents + which match any of the specified queries. + :arg span_term: The equivalent of the `term` query but for use with + other span queries. + :arg span_within: The result from a single span query is returned as + long is its span falls within the spans returned by a list of + other span queries. + """ + + span_containing: Union["SpanContainingQuery", Dict[str, Any], DefaultType] + span_field_masking: Union["SpanFieldMaskingQuery", Dict[str, Any], DefaultType] + span_first: Union["SpanFirstQuery", Dict[str, Any], DefaultType] + span_gap: Union[Mapping[Union[str, InstrumentedField], int], DefaultType] + span_multi: Union["SpanMultiTermQuery", Dict[str, Any], DefaultType] + span_near: Union["SpanNearQuery", Dict[str, Any], DefaultType] + span_not: Union["SpanNotQuery", Dict[str, Any], DefaultType] + span_or: Union["SpanOrQuery", Dict[str, Any], DefaultType] + span_term: Union[ + Mapping[Union[str, InstrumentedField], "SpanTermQuery"], + Dict[str, Any], + DefaultType, + ] + span_within: Union["SpanWithinQuery", Dict[str, Any], DefaultType] + + def __init__( + self, + *, + span_containing: Union[ + "SpanContainingQuery", Dict[str, Any], DefaultType + ] = DEFAULT, + span_field_masking: Union[ + "SpanFieldMaskingQuery", Dict[str, Any], DefaultType + ] = DEFAULT, + span_first: Union["SpanFirstQuery", Dict[str, Any], DefaultType] = DEFAULT, + span_gap: Union[ + Mapping[Union[str, InstrumentedField], int], DefaultType + ] = DEFAULT, + span_multi: Union["SpanMultiTermQuery", Dict[str, Any], DefaultType] = DEFAULT, + span_near: Union["SpanNearQuery", Dict[str, Any], DefaultType] = DEFAULT, + span_not: Union["SpanNotQuery", Dict[str, Any], DefaultType] = DEFAULT, + span_or: Union["SpanOrQuery", Dict[str, Any], DefaultType] = DEFAULT, + span_term: Union[ + Mapping[Union[str, InstrumentedField], "SpanTermQuery"], + Dict[str, Any], + DefaultType, + ] = DEFAULT, + span_within: Union["SpanWithinQuery", Dict[str, Any], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if span_containing is not DEFAULT: + kwargs["span_containing"] = span_containing + if span_field_masking is not DEFAULT: + kwargs["span_field_masking"] = span_field_masking + if span_first is not DEFAULT: + kwargs["span_first"] = span_first + if span_gap is not DEFAULT: + kwargs["span_gap"] = str(span_gap) + if span_multi is not DEFAULT: + kwargs["span_multi"] = span_multi + if span_near is not DEFAULT: + kwargs["span_near"] = span_near + if span_not is not DEFAULT: + kwargs["span_not"] = span_not + if span_or is not DEFAULT: + kwargs["span_or"] = span_or + if span_term is not DEFAULT: + kwargs["span_term"] = str(span_term) + if span_within is not DEFAULT: + kwargs["span_within"] = span_within + super().__init__(kwargs) + + +class SpanTermQuery(QueryBase): + """ + :arg value: (required) + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + value: Union[str, DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + value: Union[str, DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if value is not DEFAULT: + kwargs["value"] = value + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(**kwargs) + + +class TermQuery(QueryBase): + """ + :arg value: (required) Term you wish to find in the provided field. + :arg case_insensitive: Allows ASCII case insensitive matching of the + value with the indexed field values when set to `true`. When + `false`, the case sensitivity of matching depends on the + underlying field’s mapping. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + value: Union[int, float, str, bool, None, Any, DefaultType] + case_insensitive: Union[bool, DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + value: Union[int, float, str, bool, None, Any, DefaultType] = DEFAULT, + case_insensitive: Union[bool, DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if value is not DEFAULT: + kwargs["value"] = value + if case_insensitive is not DEFAULT: + kwargs["case_insensitive"] = case_insensitive + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(**kwargs) + + +class TermsLookup(AttrDict[Any]): + """ + :arg index: (required) + :arg id: (required) + :arg path: (required) + :arg routing: + """ + + index: Union[str, DefaultType] + id: Union[str, DefaultType] + path: Union[str, InstrumentedField, DefaultType] + routing: Union[str, DefaultType] + + def __init__( + self, + *, + index: Union[str, DefaultType] = DEFAULT, + id: Union[str, DefaultType] = DEFAULT, + path: Union[str, InstrumentedField, DefaultType] = DEFAULT, + routing: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if index is not DEFAULT: + kwargs["index"] = index + if id is not DEFAULT: + kwargs["id"] = id + if path is not DEFAULT: + kwargs["path"] = str(path) + if routing is not DEFAULT: + kwargs["routing"] = routing + super().__init__(kwargs) + + +class TermsSetQuery(QueryBase): + """ + :arg terms: (required) Array of terms you wish to find in the provided + field. + :arg minimum_should_match_field: Numeric field containing the number + of matching terms required to return a document. + :arg minimum_should_match_script: Custom script containing the number + of matching terms required to return a document. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + terms: Union[Sequence[str], DefaultType] + minimum_should_match_field: Union[str, InstrumentedField, DefaultType] + minimum_should_match_script: Union["Script", Dict[str, Any], DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + terms: Union[Sequence[str], DefaultType] = DEFAULT, + minimum_should_match_field: Union[ + str, InstrumentedField, DefaultType + ] = DEFAULT, + minimum_should_match_script: Union[ + "Script", Dict[str, Any], DefaultType + ] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if terms is not DEFAULT: + kwargs["terms"] = terms + if minimum_should_match_field is not DEFAULT: + kwargs["minimum_should_match_field"] = str(minimum_should_match_field) + if minimum_should_match_script is not DEFAULT: + kwargs["minimum_should_match_script"] = minimum_should_match_script + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(**kwargs) + + +class TextExpansionQuery(QueryBase): + """ + :arg model_id: (required) The text expansion NLP model to use + :arg model_text: (required) The query text + :arg pruning_config: Token pruning configurations + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + model_id: Union[str, DefaultType] + model_text: Union[str, DefaultType] + pruning_config: Union["TokenPruningConfig", Dict[str, Any], DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + model_id: Union[str, DefaultType] = DEFAULT, + model_text: Union[str, DefaultType] = DEFAULT, + pruning_config: Union[ + "TokenPruningConfig", Dict[str, Any], DefaultType + ] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if model_id is not DEFAULT: + kwargs["model_id"] = model_id + if model_text is not DEFAULT: + kwargs["model_text"] = model_text + if pruning_config is not DEFAULT: + kwargs["pruning_config"] = pruning_config + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(**kwargs) + + +class TokenPruningConfig(AttrDict[Any]): + """ + :arg tokens_freq_ratio_threshold: Tokens whose frequency is more than + this threshold times the average frequency of all tokens in the + specified field are considered outliers and pruned. Defaults to + `5` if omitted. + :arg tokens_weight_threshold: Tokens whose weight is less than this + threshold are considered nonsignificant and pruned. Defaults to + `0.4` if omitted. + :arg only_score_pruned_tokens: Whether to only score pruned tokens, vs + only scoring kept tokens. + """ + + tokens_freq_ratio_threshold: Union[int, DefaultType] + tokens_weight_threshold: Union[float, DefaultType] + only_score_pruned_tokens: Union[bool, DefaultType] + + def __init__( + self, + *, + tokens_freq_ratio_threshold: Union[int, DefaultType] = DEFAULT, + tokens_weight_threshold: Union[float, DefaultType] = DEFAULT, + only_score_pruned_tokens: Union[bool, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if tokens_freq_ratio_threshold is not DEFAULT: + kwargs["tokens_freq_ratio_threshold"] = tokens_freq_ratio_threshold + if tokens_weight_threshold is not DEFAULT: + kwargs["tokens_weight_threshold"] = tokens_weight_threshold + if only_score_pruned_tokens is not DEFAULT: + kwargs["only_score_pruned_tokens"] = only_score_pruned_tokens + super().__init__(kwargs) + + +class TopLeftBottomRightGeoBounds(AttrDict[Any]): + """ + :arg top_left: (required) + :arg bottom_right: (required) + """ + + top_left: Union[ + "LatLonGeoLocation", + "GeoHashLocation", + Sequence[float], + str, + Dict[str, Any], + DefaultType, + ] + bottom_right: Union[ + "LatLonGeoLocation", + "GeoHashLocation", + Sequence[float], + str, + Dict[str, Any], + DefaultType, + ] + + def __init__( + self, + *, + top_left: Union[ + "LatLonGeoLocation", + "GeoHashLocation", + Sequence[float], + str, + Dict[str, Any], + DefaultType, + ] = DEFAULT, + bottom_right: Union[ + "LatLonGeoLocation", + "GeoHashLocation", + Sequence[float], + str, + Dict[str, Any], + DefaultType, + ] = DEFAULT, + **kwargs: Any, + ): + if top_left is not DEFAULT: + kwargs["top_left"] = top_left + if bottom_right is not DEFAULT: + kwargs["bottom_right"] = bottom_right + super().__init__(kwargs) + + +class TopRightBottomLeftGeoBounds(AttrDict[Any]): + """ + :arg top_right: (required) + :arg bottom_left: (required) + """ + + top_right: Union[ + "LatLonGeoLocation", + "GeoHashLocation", + Sequence[float], + str, + Dict[str, Any], + DefaultType, + ] + bottom_left: Union[ + "LatLonGeoLocation", + "GeoHashLocation", + Sequence[float], + str, + Dict[str, Any], + DefaultType, + ] + + def __init__( + self, + *, + top_right: Union[ + "LatLonGeoLocation", + "GeoHashLocation", + Sequence[float], + str, + Dict[str, Any], + DefaultType, + ] = DEFAULT, + bottom_left: Union[ + "LatLonGeoLocation", + "GeoHashLocation", + Sequence[float], + str, + Dict[str, Any], + DefaultType, + ] = DEFAULT, + **kwargs: Any, + ): + if top_right is not DEFAULT: + kwargs["top_right"] = top_right + if bottom_left is not DEFAULT: + kwargs["bottom_left"] = bottom_left + super().__init__(kwargs) + + +class WeightedTokensQuery(QueryBase): + """ + :arg tokens: (required) The tokens representing this query + :arg pruning_config: Token pruning configurations + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + tokens: Union[Mapping[str, float], DefaultType] + pruning_config: Union["TokenPruningConfig", Dict[str, Any], DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + tokens: Union[Mapping[str, float], DefaultType] = DEFAULT, + pruning_config: Union[ + "TokenPruningConfig", Dict[str, Any], DefaultType + ] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if tokens is not DEFAULT: + kwargs["tokens"] = tokens + if pruning_config is not DEFAULT: + kwargs["pruning_config"] = pruning_config + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(**kwargs) + + +class WildcardQuery(QueryBase): + """ + :arg case_insensitive: Allows case insensitive matching of the pattern + with the indexed field values when set to true. Default is false + which means the case sensitivity of matching depends on the + underlying field’s mapping. + :arg rewrite: Method used to rewrite the query. + :arg value: Wildcard pattern for terms you wish to find in the + provided field. Required, when wildcard is not set. + :arg wildcard: Wildcard pattern for terms you wish to find in the + provided field. Required, when value is not set. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + case_insensitive: Union[bool, DefaultType] + rewrite: Union[str, DefaultType] + value: Union[str, DefaultType] + wildcard: Union[str, DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + case_insensitive: Union[bool, DefaultType] = DEFAULT, + rewrite: Union[str, DefaultType] = DEFAULT, + value: Union[str, DefaultType] = DEFAULT, + wildcard: Union[str, DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if case_insensitive is not DEFAULT: + kwargs["case_insensitive"] = case_insensitive + if rewrite is not DEFAULT: + kwargs["rewrite"] = rewrite + if value is not DEFAULT: + kwargs["value"] = value + if wildcard is not DEFAULT: + kwargs["wildcard"] = wildcard + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(**kwargs) + + +class WktGeoBounds(AttrDict[Any]): + """ + :arg wkt: (required) + """ + + wkt: Union[str, DefaultType] + + def __init__(self, *, wkt: Union[str, DefaultType] = DEFAULT, **kwargs: Any): + if wkt is not DEFAULT: + kwargs["wkt"] = wkt + super().__init__(kwargs) + + +class FieldLookup(AttrDict[Any]): + """ + :arg id: (required) `id` of the document. + :arg index: Index from which to retrieve the document. + :arg path: Name of the field. + :arg routing: Custom routing value. + """ + + id: Union[str, DefaultType] + index: Union[str, DefaultType] + path: Union[str, InstrumentedField, DefaultType] + routing: Union[str, DefaultType] + + def __init__( + self, + *, + id: Union[str, DefaultType] = DEFAULT, + index: Union[str, DefaultType] = DEFAULT, + path: Union[str, InstrumentedField, DefaultType] = DEFAULT, + routing: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if id is not DEFAULT: + kwargs["id"] = id + if index is not DEFAULT: + kwargs["index"] = index + if path is not DEFAULT: + kwargs["path"] = str(path) + if routing is not DEFAULT: + kwargs["routing"] = routing + super().__init__(kwargs) + + +class FieldCollapse(AttrDict[Any]): + """ + :arg field: (required) The field to collapse the result set on + :arg inner_hits: The number of inner hits and their sort order + :arg max_concurrent_group_searches: The number of concurrent requests + allowed to retrieve the inner_hits per group + :arg collapse: + """ + + field: Union[str, InstrumentedField, DefaultType] + inner_hits: Union["InnerHits", Sequence["InnerHits"], Dict[str, Any], DefaultType] + max_concurrent_group_searches: Union[int, DefaultType] + collapse: Union["FieldCollapse", Dict[str, Any], DefaultType] + + def __init__( + self, + *, + field: Union[str, InstrumentedField, DefaultType] = DEFAULT, + inner_hits: Union[ + "InnerHits", Sequence["InnerHits"], Dict[str, Any], DefaultType + ] = DEFAULT, + max_concurrent_group_searches: Union[int, DefaultType] = DEFAULT, + collapse: Union["FieldCollapse", Dict[str, Any], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if field is not DEFAULT: + kwargs["field"] = str(field) + if inner_hits is not DEFAULT: + kwargs["inner_hits"] = inner_hits + if max_concurrent_group_searches is not DEFAULT: + kwargs["max_concurrent_group_searches"] = max_concurrent_group_searches + if collapse is not DEFAULT: + kwargs["collapse"] = collapse + super().__init__(kwargs) + + +class FieldAndFormat(AttrDict[Any]): + """ + :arg field: (required) Wildcard pattern. The request returns values + for field names matching this pattern. + :arg format: Format in which the values are returned. + :arg include_unmapped: + """ + + field: Union[str, InstrumentedField, DefaultType] + format: Union[str, DefaultType] + include_unmapped: Union[bool, DefaultType] + + def __init__( + self, + *, + field: Union[str, InstrumentedField, DefaultType] = DEFAULT, + format: Union[str, DefaultType] = DEFAULT, + include_unmapped: Union[bool, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if field is not DEFAULT: + kwargs["field"] = str(field) + if format is not DEFAULT: + kwargs["format"] = format + if include_unmapped is not DEFAULT: + kwargs["include_unmapped"] = include_unmapped + super().__init__(kwargs) + + +class HighlightBase(AttrDict[Any]): + """ + :arg type: + :arg boundary_chars: A string that contains each boundary character. + Defaults to `.,!? \t\n` if omitted. + :arg boundary_max_scan: How far to scan for boundary characters. + Defaults to `20` if omitted. + :arg boundary_scanner: Specifies how to break the highlighted + fragments: chars, sentence, or word. Only valid for the unified + and fvh highlighters. Defaults to `sentence` for the `unified` + highlighter. Defaults to `chars` for the `fvh` highlighter. + :arg boundary_scanner_locale: Controls which locale is used to search + for sentence and word boundaries. This parameter takes a form of a + language tag, for example: `"en-US"`, `"fr-FR"`, `"ja-JP"`. + Defaults to `Locale.ROOT` if omitted. + :arg force_source: + :arg fragmenter: Specifies how text should be broken up in highlight + snippets: `simple` or `span`. Only valid for the `plain` + highlighter. Defaults to `span` if omitted. + :arg fragment_size: The size of the highlighted fragment in + characters. Defaults to `100` if omitted. + :arg highlight_filter: + :arg highlight_query: Highlight matches for a query other than the + search query. This is especially useful if you use a rescore query + because those are not taken into account by highlighting by + default. + :arg max_fragment_length: + :arg max_analyzed_offset: If set to a non-negative value, highlighting + stops at this defined maximum limit. The rest of the text is not + processed, thus not highlighted and no error is returned The + `max_analyzed_offset` query setting does not override the + `index.highlight.max_analyzed_offset` setting, which prevails when + it’s set to lower value than the query setting. + :arg no_match_size: The amount of text you want to return from the + beginning of the field if there are no matching fragments to + highlight. + :arg number_of_fragments: The maximum number of fragments to return. + If the number of fragments is set to `0`, no fragments are + returned. Instead, the entire field contents are highlighted and + returned. This can be handy when you need to highlight short texts + such as a title or address, but fragmentation is not required. If + `number_of_fragments` is `0`, `fragment_size` is ignored. Defaults + to `5` if omitted. + :arg options: + :arg order: Sorts highlighted fragments by score when set to `score`. + By default, fragments will be output in the order they appear in + the field (order: `none`). Setting this option to `score` will + output the most relevant fragments first. Each highlighter applies + its own logic to compute relevancy scores. Defaults to `none` if + omitted. + :arg phrase_limit: Controls the number of matching phrases in a + document that are considered. Prevents the `fvh` highlighter from + analyzing too many phrases and consuming too much memory. When + using `matched_fields`, `phrase_limit` phrases per matched field + are considered. Raising the limit increases query time and + consumes more memory. Only supported by the `fvh` highlighter. + Defaults to `256` if omitted. + :arg post_tags: Use in conjunction with `pre_tags` to define the HTML + tags to use for the highlighted text. By default, highlighted text + is wrapped in `` and `` tags. + :arg pre_tags: Use in conjunction with `post_tags` to define the HTML + tags to use for the highlighted text. By default, highlighted text + is wrapped in `` and `` tags. + :arg require_field_match: By default, only fields that contains a + query match are highlighted. Set to `false` to highlight all + fields. Defaults to `True` if omitted. + :arg tags_schema: Set to `styled` to use the built-in tag schema. + """ + + type: Union[Literal["plain", "fvh", "unified"], DefaultType] + boundary_chars: Union[str, DefaultType] + boundary_max_scan: Union[int, DefaultType] + boundary_scanner: Union[Literal["chars", "sentence", "word"], DefaultType] + boundary_scanner_locale: Union[str, DefaultType] + force_source: Union[bool, DefaultType] + fragmenter: Union[Literal["simple", "span"], DefaultType] + fragment_size: Union[int, DefaultType] + highlight_filter: Union[bool, DefaultType] + highlight_query: Union[Query, DefaultType] + max_fragment_length: Union[int, DefaultType] + max_analyzed_offset: Union[int, DefaultType] + no_match_size: Union[int, DefaultType] + number_of_fragments: Union[int, DefaultType] + options: Union[Mapping[str, Any], DefaultType] + order: Union[Literal["score"], DefaultType] + phrase_limit: Union[int, DefaultType] + post_tags: Union[Sequence[str], DefaultType] + pre_tags: Union[Sequence[str], DefaultType] + require_field_match: Union[bool, DefaultType] + tags_schema: Union[Literal["styled"], DefaultType] + + def __init__( + self, + *, + type: Union[Literal["plain", "fvh", "unified"], DefaultType] = DEFAULT, + boundary_chars: Union[str, DefaultType] = DEFAULT, + boundary_max_scan: Union[int, DefaultType] = DEFAULT, + boundary_scanner: Union[ + Literal["chars", "sentence", "word"], DefaultType + ] = DEFAULT, + boundary_scanner_locale: Union[str, DefaultType] = DEFAULT, + force_source: Union[bool, DefaultType] = DEFAULT, + fragmenter: Union[Literal["simple", "span"], DefaultType] = DEFAULT, + fragment_size: Union[int, DefaultType] = DEFAULT, + highlight_filter: Union[bool, DefaultType] = DEFAULT, + highlight_query: Union[Query, DefaultType] = DEFAULT, + max_fragment_length: Union[int, DefaultType] = DEFAULT, + max_analyzed_offset: Union[int, DefaultType] = DEFAULT, + no_match_size: Union[int, DefaultType] = DEFAULT, + number_of_fragments: Union[int, DefaultType] = DEFAULT, + options: Union[Mapping[str, Any], DefaultType] = DEFAULT, + order: Union[Literal["score"], DefaultType] = DEFAULT, + phrase_limit: Union[int, DefaultType] = DEFAULT, + post_tags: Union[Sequence[str], DefaultType] = DEFAULT, + pre_tags: Union[Sequence[str], DefaultType] = DEFAULT, + require_field_match: Union[bool, DefaultType] = DEFAULT, + tags_schema: Union[Literal["styled"], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if type is not DEFAULT: + kwargs["type"] = type + if boundary_chars is not DEFAULT: + kwargs["boundary_chars"] = boundary_chars + if boundary_max_scan is not DEFAULT: + kwargs["boundary_max_scan"] = boundary_max_scan + if boundary_scanner is not DEFAULT: + kwargs["boundary_scanner"] = boundary_scanner + if boundary_scanner_locale is not DEFAULT: + kwargs["boundary_scanner_locale"] = boundary_scanner_locale + if force_source is not DEFAULT: + kwargs["force_source"] = force_source + if fragmenter is not DEFAULT: + kwargs["fragmenter"] = fragmenter + if fragment_size is not DEFAULT: + kwargs["fragment_size"] = fragment_size + if highlight_filter is not DEFAULT: + kwargs["highlight_filter"] = highlight_filter + if highlight_query is not DEFAULT: + kwargs["highlight_query"] = highlight_query + if max_fragment_length is not DEFAULT: + kwargs["max_fragment_length"] = max_fragment_length + if max_analyzed_offset is not DEFAULT: + kwargs["max_analyzed_offset"] = max_analyzed_offset + if no_match_size is not DEFAULT: + kwargs["no_match_size"] = no_match_size + if number_of_fragments is not DEFAULT: + kwargs["number_of_fragments"] = number_of_fragments + if options is not DEFAULT: + kwargs["options"] = options + if order is not DEFAULT: + kwargs["order"] = order + if phrase_limit is not DEFAULT: + kwargs["phrase_limit"] = phrase_limit + if post_tags is not DEFAULT: + kwargs["post_tags"] = post_tags + if pre_tags is not DEFAULT: + kwargs["pre_tags"] = pre_tags + if require_field_match is not DEFAULT: + kwargs["require_field_match"] = require_field_match + if tags_schema is not DEFAULT: + kwargs["tags_schema"] = tags_schema + super().__init__(kwargs) + + +class Highlight(HighlightBase): + """ + :arg fields: (required) + :arg encoder: + :arg type: + :arg boundary_chars: A string that contains each boundary character. + Defaults to `.,!? \t\n` if omitted. + :arg boundary_max_scan: How far to scan for boundary characters. + Defaults to `20` if omitted. + :arg boundary_scanner: Specifies how to break the highlighted + fragments: chars, sentence, or word. Only valid for the unified + and fvh highlighters. Defaults to `sentence` for the `unified` + highlighter. Defaults to `chars` for the `fvh` highlighter. + :arg boundary_scanner_locale: Controls which locale is used to search + for sentence and word boundaries. This parameter takes a form of a + language tag, for example: `"en-US"`, `"fr-FR"`, `"ja-JP"`. + Defaults to `Locale.ROOT` if omitted. + :arg force_source: + :arg fragmenter: Specifies how text should be broken up in highlight + snippets: `simple` or `span`. Only valid for the `plain` + highlighter. Defaults to `span` if omitted. + :arg fragment_size: The size of the highlighted fragment in + characters. Defaults to `100` if omitted. + :arg highlight_filter: + :arg highlight_query: Highlight matches for a query other than the + search query. This is especially useful if you use a rescore query + because those are not taken into account by highlighting by + default. + :arg max_fragment_length: + :arg max_analyzed_offset: If set to a non-negative value, highlighting + stops at this defined maximum limit. The rest of the text is not + processed, thus not highlighted and no error is returned The + `max_analyzed_offset` query setting does not override the + `index.highlight.max_analyzed_offset` setting, which prevails when + it’s set to lower value than the query setting. + :arg no_match_size: The amount of text you want to return from the + beginning of the field if there are no matching fragments to + highlight. + :arg number_of_fragments: The maximum number of fragments to return. + If the number of fragments is set to `0`, no fragments are + returned. Instead, the entire field contents are highlighted and + returned. This can be handy when you need to highlight short texts + such as a title or address, but fragmentation is not required. If + `number_of_fragments` is `0`, `fragment_size` is ignored. Defaults + to `5` if omitted. + :arg options: + :arg order: Sorts highlighted fragments by score when set to `score`. + By default, fragments will be output in the order they appear in + the field (order: `none`). Setting this option to `score` will + output the most relevant fragments first. Each highlighter applies + its own logic to compute relevancy scores. Defaults to `none` if + omitted. + :arg phrase_limit: Controls the number of matching phrases in a + document that are considered. Prevents the `fvh` highlighter from + analyzing too many phrases and consuming too much memory. When + using `matched_fields`, `phrase_limit` phrases per matched field + are considered. Raising the limit increases query time and + consumes more memory. Only supported by the `fvh` highlighter. + Defaults to `256` if omitted. + :arg post_tags: Use in conjunction with `pre_tags` to define the HTML + tags to use for the highlighted text. By default, highlighted text + is wrapped in `` and `` tags. + :arg pre_tags: Use in conjunction with `post_tags` to define the HTML + tags to use for the highlighted text. By default, highlighted text + is wrapped in `` and `` tags. + :arg require_field_match: By default, only fields that contains a + query match are highlighted. Set to `false` to highlight all + fields. Defaults to `True` if omitted. + :arg tags_schema: Set to `styled` to use the built-in tag schema. + """ + + fields: Union[ + Mapping[Union[str, InstrumentedField], "HighlightField"], + Dict[str, Any], + DefaultType, + ] + encoder: Union[Literal["default", "html"], DefaultType] + type: Union[Literal["plain", "fvh", "unified"], DefaultType] + boundary_chars: Union[str, DefaultType] + boundary_max_scan: Union[int, DefaultType] + boundary_scanner: Union[Literal["chars", "sentence", "word"], DefaultType] + boundary_scanner_locale: Union[str, DefaultType] + force_source: Union[bool, DefaultType] + fragmenter: Union[Literal["simple", "span"], DefaultType] + fragment_size: Union[int, DefaultType] + highlight_filter: Union[bool, DefaultType] + highlight_query: Union[Query, DefaultType] + max_fragment_length: Union[int, DefaultType] + max_analyzed_offset: Union[int, DefaultType] + no_match_size: Union[int, DefaultType] + number_of_fragments: Union[int, DefaultType] + options: Union[Mapping[str, Any], DefaultType] + order: Union[Literal["score"], DefaultType] + phrase_limit: Union[int, DefaultType] + post_tags: Union[Sequence[str], DefaultType] + pre_tags: Union[Sequence[str], DefaultType] + require_field_match: Union[bool, DefaultType] + tags_schema: Union[Literal["styled"], DefaultType] + + def __init__( + self, + *, + fields: Union[ + Mapping[Union[str, InstrumentedField], "HighlightField"], + Dict[str, Any], + DefaultType, + ] = DEFAULT, + encoder: Union[Literal["default", "html"], DefaultType] = DEFAULT, + type: Union[Literal["plain", "fvh", "unified"], DefaultType] = DEFAULT, + boundary_chars: Union[str, DefaultType] = DEFAULT, + boundary_max_scan: Union[int, DefaultType] = DEFAULT, + boundary_scanner: Union[ + Literal["chars", "sentence", "word"], DefaultType + ] = DEFAULT, + boundary_scanner_locale: Union[str, DefaultType] = DEFAULT, + force_source: Union[bool, DefaultType] = DEFAULT, + fragmenter: Union[Literal["simple", "span"], DefaultType] = DEFAULT, + fragment_size: Union[int, DefaultType] = DEFAULT, + highlight_filter: Union[bool, DefaultType] = DEFAULT, + highlight_query: Union[Query, DefaultType] = DEFAULT, + max_fragment_length: Union[int, DefaultType] = DEFAULT, + max_analyzed_offset: Union[int, DefaultType] = DEFAULT, + no_match_size: Union[int, DefaultType] = DEFAULT, + number_of_fragments: Union[int, DefaultType] = DEFAULT, + options: Union[Mapping[str, Any], DefaultType] = DEFAULT, + order: Union[Literal["score"], DefaultType] = DEFAULT, + phrase_limit: Union[int, DefaultType] = DEFAULT, + post_tags: Union[Sequence[str], DefaultType] = DEFAULT, + pre_tags: Union[Sequence[str], DefaultType] = DEFAULT, + require_field_match: Union[bool, DefaultType] = DEFAULT, + tags_schema: Union[Literal["styled"], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if fields is not DEFAULT: + kwargs["fields"] = str(fields) + if encoder is not DEFAULT: + kwargs["encoder"] = encoder + if type is not DEFAULT: + kwargs["type"] = type + if boundary_chars is not DEFAULT: + kwargs["boundary_chars"] = boundary_chars + if boundary_max_scan is not DEFAULT: + kwargs["boundary_max_scan"] = boundary_max_scan + if boundary_scanner is not DEFAULT: + kwargs["boundary_scanner"] = boundary_scanner + if boundary_scanner_locale is not DEFAULT: + kwargs["boundary_scanner_locale"] = boundary_scanner_locale + if force_source is not DEFAULT: + kwargs["force_source"] = force_source + if fragmenter is not DEFAULT: + kwargs["fragmenter"] = fragmenter + if fragment_size is not DEFAULT: + kwargs["fragment_size"] = fragment_size + if highlight_filter is not DEFAULT: + kwargs["highlight_filter"] = highlight_filter + if highlight_query is not DEFAULT: + kwargs["highlight_query"] = highlight_query + if max_fragment_length is not DEFAULT: + kwargs["max_fragment_length"] = max_fragment_length + if max_analyzed_offset is not DEFAULT: + kwargs["max_analyzed_offset"] = max_analyzed_offset + if no_match_size is not DEFAULT: + kwargs["no_match_size"] = no_match_size + if number_of_fragments is not DEFAULT: + kwargs["number_of_fragments"] = number_of_fragments + if options is not DEFAULT: + kwargs["options"] = options + if order is not DEFAULT: + kwargs["order"] = order + if phrase_limit is not DEFAULT: + kwargs["phrase_limit"] = phrase_limit + if post_tags is not DEFAULT: + kwargs["post_tags"] = post_tags + if pre_tags is not DEFAULT: + kwargs["pre_tags"] = pre_tags + if require_field_match is not DEFAULT: + kwargs["require_field_match"] = require_field_match + if tags_schema is not DEFAULT: + kwargs["tags_schema"] = tags_schema + super().__init__(**kwargs) + + +class ScriptField(AttrDict[Any]): + """ + :arg script: (required) + :arg ignore_failure: + """ + + script: Union["Script", Dict[str, Any], DefaultType] + ignore_failure: Union[bool, DefaultType] + + def __init__( + self, + *, + script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT, + ignore_failure: Union[bool, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if script is not DEFAULT: + kwargs["script"] = script + if ignore_failure is not DEFAULT: + kwargs["ignore_failure"] = ignore_failure + super().__init__(kwargs) + + +class SortOptions(AttrDict[Any]): + """ + :arg _score: + :arg _doc: + :arg _geo_distance: + :arg _script: + """ + + _score: Union["ScoreSort", Dict[str, Any], DefaultType] + _doc: Union["ScoreSort", Dict[str, Any], DefaultType] + _geo_distance: Union["GeoDistanceSort", Dict[str, Any], DefaultType] + _script: Union["ScriptSort", Dict[str, Any], DefaultType] + + def __init__( + self, + *, + _score: Union["ScoreSort", Dict[str, Any], DefaultType] = DEFAULT, + _doc: Union["ScoreSort", Dict[str, Any], DefaultType] = DEFAULT, + _geo_distance: Union["GeoDistanceSort", Dict[str, Any], DefaultType] = DEFAULT, + _script: Union["ScriptSort", Dict[str, Any], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if _score is not DEFAULT: + kwargs["_score"] = _score + if _doc is not DEFAULT: + kwargs["_doc"] = _doc + if _geo_distance is not DEFAULT: + kwargs["_geo_distance"] = _geo_distance + if _script is not DEFAULT: + kwargs["_script"] = _script + super().__init__(kwargs) + + +class SourceFilter(AttrDict[Any]): + """ + :arg excludes: + :arg includes: + """ + + excludes: Union[ + Union[str, InstrumentedField], + Sequence[Union[str, InstrumentedField]], + DefaultType, + ] + includes: Union[ + Union[str, InstrumentedField], + Sequence[Union[str, InstrumentedField]], + DefaultType, + ] + + def __init__( + self, + *, + excludes: Union[ + Union[str, InstrumentedField], + Sequence[Union[str, InstrumentedField]], + DefaultType, + ] = DEFAULT, + includes: Union[ + Union[str, InstrumentedField], + Sequence[Union[str, InstrumentedField]], + DefaultType, + ] = DEFAULT, + **kwargs: Any, + ): + if excludes is not DEFAULT: + kwargs["excludes"] = str(excludes) + if includes is not DEFAULT: + kwargs["includes"] = str(includes) + super().__init__(kwargs) + + +class IntervalsAllOf(AttrDict[Any]): + """ + :arg intervals: (required) An array of rules to combine. All rules + must produce a match in a document for the overall source to + match. + :arg max_gaps: Maximum number of positions between the matching terms. + Intervals produced by the rules further apart than this are not + considered matches. Defaults to `-1` if omitted. + :arg ordered: If `true`, intervals produced by the rules should appear + in the order in which they are specified. + :arg filter: Rule used to filter returned intervals. + """ + + intervals: Union[Sequence["IntervalsContainer"], Dict[str, Any], DefaultType] + max_gaps: Union[int, DefaultType] + ordered: Union[bool, DefaultType] + filter: Union["IntervalsFilter", Dict[str, Any], DefaultType] + + def __init__( + self, + *, + intervals: Union[ + Sequence["IntervalsContainer"], Dict[str, Any], DefaultType + ] = DEFAULT, + max_gaps: Union[int, DefaultType] = DEFAULT, + ordered: Union[bool, DefaultType] = DEFAULT, + filter: Union["IntervalsFilter", Dict[str, Any], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if intervals is not DEFAULT: + kwargs["intervals"] = intervals + if max_gaps is not DEFAULT: + kwargs["max_gaps"] = max_gaps + if ordered is not DEFAULT: + kwargs["ordered"] = ordered + if filter is not DEFAULT: + kwargs["filter"] = filter + super().__init__(kwargs) + + +class IntervalsAnyOf(AttrDict[Any]): + """ + :arg intervals: (required) An array of rules to match. + :arg filter: Rule used to filter returned intervals. + """ + + intervals: Union[Sequence["IntervalsContainer"], Dict[str, Any], DefaultType] + filter: Union["IntervalsFilter", Dict[str, Any], DefaultType] + + def __init__( + self, + *, + intervals: Union[ + Sequence["IntervalsContainer"], Dict[str, Any], DefaultType + ] = DEFAULT, + filter: Union["IntervalsFilter", Dict[str, Any], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if intervals is not DEFAULT: + kwargs["intervals"] = intervals + if filter is not DEFAULT: + kwargs["filter"] = filter + super().__init__(kwargs) + + +class IntervalsFuzzy(AttrDict[Any]): + """ + :arg term: (required) The term to match. + :arg analyzer: Analyzer used to normalize the term. + :arg fuzziness: Maximum edit distance allowed for matching. Defaults + to `auto` if omitted. + :arg prefix_length: Number of beginning characters left unchanged when + creating expansions. + :arg transpositions: Indicates whether edits include transpositions of + two adjacent characters (for example, `ab` to `ba`). Defaults to + `True` if omitted. + :arg use_field: If specified, match intervals from this field rather + than the top-level field. The `term` is normalized using the + search analyzer from this field, unless `analyzer` is specified + separately. + """ + + term: Union[str, DefaultType] + analyzer: Union[str, DefaultType] + fuzziness: Union[str, int, DefaultType] + prefix_length: Union[int, DefaultType] + transpositions: Union[bool, DefaultType] + use_field: Union[str, InstrumentedField, DefaultType] + + def __init__( + self, + *, + term: Union[str, DefaultType] = DEFAULT, + analyzer: Union[str, DefaultType] = DEFAULT, + fuzziness: Union[str, int, DefaultType] = DEFAULT, + prefix_length: Union[int, DefaultType] = DEFAULT, + transpositions: Union[bool, DefaultType] = DEFAULT, + use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if term is not DEFAULT: + kwargs["term"] = term + if analyzer is not DEFAULT: + kwargs["analyzer"] = analyzer + if fuzziness is not DEFAULT: + kwargs["fuzziness"] = fuzziness + if prefix_length is not DEFAULT: + kwargs["prefix_length"] = prefix_length + if transpositions is not DEFAULT: + kwargs["transpositions"] = transpositions + if use_field is not DEFAULT: + kwargs["use_field"] = str(use_field) + super().__init__(kwargs) + + +class IntervalsMatch(AttrDict[Any]): + """ + :arg query: (required) Text you wish to find in the provided field. + :arg analyzer: Analyzer used to analyze terms in the query. + :arg max_gaps: Maximum number of positions between the matching terms. + Terms further apart than this are not considered matches. Defaults + to `-1` if omitted. + :arg ordered: If `true`, matching terms must appear in their specified + order. + :arg use_field: If specified, match intervals from this field rather + than the top-level field. The `term` is normalized using the + search analyzer from this field, unless `analyzer` is specified + separately. + :arg filter: An optional interval filter. + """ + + query: Union[str, DefaultType] + analyzer: Union[str, DefaultType] + max_gaps: Union[int, DefaultType] + ordered: Union[bool, DefaultType] + use_field: Union[str, InstrumentedField, DefaultType] + filter: Union["IntervalsFilter", Dict[str, Any], DefaultType] + + def __init__( + self, + *, + query: Union[str, DefaultType] = DEFAULT, + analyzer: Union[str, DefaultType] = DEFAULT, + max_gaps: Union[int, DefaultType] = DEFAULT, + ordered: Union[bool, DefaultType] = DEFAULT, + use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT, + filter: Union["IntervalsFilter", Dict[str, Any], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if query is not DEFAULT: + kwargs["query"] = query + if analyzer is not DEFAULT: + kwargs["analyzer"] = analyzer + if max_gaps is not DEFAULT: + kwargs["max_gaps"] = max_gaps + if ordered is not DEFAULT: + kwargs["ordered"] = ordered + if use_field is not DEFAULT: + kwargs["use_field"] = str(use_field) + if filter is not DEFAULT: + kwargs["filter"] = filter + super().__init__(kwargs) + + +class IntervalsPrefix(AttrDict[Any]): + """ + :arg prefix: (required) Beginning characters of terms you wish to find + in the top-level field. + :arg analyzer: Analyzer used to analyze the `prefix`. + :arg use_field: If specified, match intervals from this field rather + than the top-level field. The `prefix` is normalized using the + search analyzer from this field, unless `analyzer` is specified + separately. + """ + + prefix: Union[str, DefaultType] + analyzer: Union[str, DefaultType] + use_field: Union[str, InstrumentedField, DefaultType] + + def __init__( + self, + *, + prefix: Union[str, DefaultType] = DEFAULT, + analyzer: Union[str, DefaultType] = DEFAULT, + use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if prefix is not DEFAULT: + kwargs["prefix"] = prefix + if analyzer is not DEFAULT: + kwargs["analyzer"] = analyzer + if use_field is not DEFAULT: + kwargs["use_field"] = str(use_field) + super().__init__(kwargs) + + +class IntervalsWildcard(AttrDict[Any]): + """ + :arg pattern: (required) Wildcard pattern used to find matching terms. + :arg analyzer: Analyzer used to analyze the `pattern`. Defaults to the + top-level field's analyzer. + :arg use_field: If specified, match intervals from this field rather + than the top-level field. The `pattern` is normalized using the + search analyzer from this field, unless `analyzer` is specified + separately. + """ + + pattern: Union[str, DefaultType] + analyzer: Union[str, DefaultType] + use_field: Union[str, InstrumentedField, DefaultType] + + def __init__( + self, + *, + pattern: Union[str, DefaultType] = DEFAULT, + analyzer: Union[str, DefaultType] = DEFAULT, + use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if pattern is not DEFAULT: + kwargs["pattern"] = pattern + if analyzer is not DEFAULT: + kwargs["analyzer"] = analyzer + if use_field is not DEFAULT: + kwargs["use_field"] = str(use_field) + super().__init__(kwargs) + + +class TextEmbedding(AttrDict[Any]): + """ + :arg model_id: (required) + :arg model_text: (required) + """ + + model_id: Union[str, DefaultType] + model_text: Union[str, DefaultType] + + def __init__( + self, + *, + model_id: Union[str, DefaultType] = DEFAULT, + model_text: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if model_id is not DEFAULT: + kwargs["model_id"] = model_id + if model_text is not DEFAULT: + kwargs["model_text"] = model_text + super().__init__(kwargs) + + +class SpanContainingQuery(QueryBase): + """ + :arg big: (required) Can be any span query. Matching spans from `big` + that contain matches from `little` are returned. + :arg little: (required) Can be any span query. Matching spans from + `big` that contain matches from `little` are returned. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + big: Union["SpanQuery", Dict[str, Any], DefaultType] + little: Union["SpanQuery", Dict[str, Any], DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + big: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT, + little: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if big is not DEFAULT: + kwargs["big"] = big + if little is not DEFAULT: + kwargs["little"] = little + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(**kwargs) + + +class SpanFieldMaskingQuery(QueryBase): + """ + :arg field: (required) + :arg query: (required) + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + field: Union[str, InstrumentedField, DefaultType] + query: Union["SpanQuery", Dict[str, Any], DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + field: Union[str, InstrumentedField, DefaultType] = DEFAULT, + query: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if field is not DEFAULT: + kwargs["field"] = str(field) + if query is not DEFAULT: + kwargs["query"] = query + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(**kwargs) + + +class SpanFirstQuery(QueryBase): + """ + :arg end: (required) Controls the maximum end position permitted in a + match. + :arg match: (required) Can be any other span type query. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + end: Union[int, DefaultType] + match: Union["SpanQuery", Dict[str, Any], DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + end: Union[int, DefaultType] = DEFAULT, + match: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if end is not DEFAULT: + kwargs["end"] = end + if match is not DEFAULT: + kwargs["match"] = match + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(**kwargs) + + +class SpanMultiTermQuery(QueryBase): + """ + :arg match: (required) Should be a multi term query (one of + `wildcard`, `fuzzy`, `prefix`, `range`, or `regexp` query). + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + match: Union[Query, DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + match: Union[Query, DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if match is not DEFAULT: + kwargs["match"] = match + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(**kwargs) + + +class SpanNearQuery(QueryBase): + """ + :arg clauses: (required) Array of one or more other span type queries. + :arg in_order: Controls whether matches are required to be in-order. + :arg slop: Controls the maximum number of intervening unmatched + positions permitted. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + clauses: Union[Sequence["SpanQuery"], Dict[str, Any], DefaultType] + in_order: Union[bool, DefaultType] + slop: Union[int, DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + clauses: Union[Sequence["SpanQuery"], Dict[str, Any], DefaultType] = DEFAULT, + in_order: Union[bool, DefaultType] = DEFAULT, + slop: Union[int, DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if clauses is not DEFAULT: + kwargs["clauses"] = clauses + if in_order is not DEFAULT: + kwargs["in_order"] = in_order + if slop is not DEFAULT: + kwargs["slop"] = slop + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(**kwargs) + + +class SpanNotQuery(QueryBase): + """ + :arg exclude: (required) Span query whose matches must not overlap + those returned. + :arg include: (required) Span query whose matches are filtered. + :arg dist: The number of tokens from within the include span that + can’t have overlap with the exclude span. Equivalent to setting + both `pre` and `post`. + :arg post: The number of tokens after the include span that can’t have + overlap with the exclude span. + :arg pre: The number of tokens before the include span that can’t have + overlap with the exclude span. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + exclude: Union["SpanQuery", Dict[str, Any], DefaultType] + include: Union["SpanQuery", Dict[str, Any], DefaultType] + dist: Union[int, DefaultType] + post: Union[int, DefaultType] + pre: Union[int, DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + exclude: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT, + include: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT, + dist: Union[int, DefaultType] = DEFAULT, + post: Union[int, DefaultType] = DEFAULT, + pre: Union[int, DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if exclude is not DEFAULT: + kwargs["exclude"] = exclude + if include is not DEFAULT: + kwargs["include"] = include + if dist is not DEFAULT: + kwargs["dist"] = dist + if post is not DEFAULT: + kwargs["post"] = post + if pre is not DEFAULT: + kwargs["pre"] = pre + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(**kwargs) + + +class SpanOrQuery(QueryBase): + """ + :arg clauses: (required) Array of one or more other span type queries. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + clauses: Union[Sequence["SpanQuery"], Dict[str, Any], DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + clauses: Union[Sequence["SpanQuery"], Dict[str, Any], DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if clauses is not DEFAULT: + kwargs["clauses"] = clauses + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(**kwargs) + + +class SpanWithinQuery(QueryBase): + """ + :arg big: (required) Can be any span query. Matching spans from + `little` that are enclosed within `big` are returned. + :arg little: (required) Can be any span query. Matching spans from + `little` that are enclosed within `big` are returned. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + big: Union["SpanQuery", Dict[str, Any], DefaultType] + little: Union["SpanQuery", Dict[str, Any], DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + big: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT, + little: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if big is not DEFAULT: + kwargs["big"] = big + if little is not DEFAULT: + kwargs["little"] = little + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(**kwargs) + + +class HighlightField(HighlightBase): + """ + :arg fragment_offset: + :arg matched_fields: + :arg analyzer: + :arg type: + :arg boundary_chars: A string that contains each boundary character. + Defaults to `.,!? \t\n` if omitted. + :arg boundary_max_scan: How far to scan for boundary characters. + Defaults to `20` if omitted. + :arg boundary_scanner: Specifies how to break the highlighted + fragments: chars, sentence, or word. Only valid for the unified + and fvh highlighters. Defaults to `sentence` for the `unified` + highlighter. Defaults to `chars` for the `fvh` highlighter. + :arg boundary_scanner_locale: Controls which locale is used to search + for sentence and word boundaries. This parameter takes a form of a + language tag, for example: `"en-US"`, `"fr-FR"`, `"ja-JP"`. + Defaults to `Locale.ROOT` if omitted. + :arg force_source: + :arg fragmenter: Specifies how text should be broken up in highlight + snippets: `simple` or `span`. Only valid for the `plain` + highlighter. Defaults to `span` if omitted. + :arg fragment_size: The size of the highlighted fragment in + characters. Defaults to `100` if omitted. + :arg highlight_filter: + :arg highlight_query: Highlight matches for a query other than the + search query. This is especially useful if you use a rescore query + because those are not taken into account by highlighting by + default. + :arg max_fragment_length: + :arg max_analyzed_offset: If set to a non-negative value, highlighting + stops at this defined maximum limit. The rest of the text is not + processed, thus not highlighted and no error is returned The + `max_analyzed_offset` query setting does not override the + `index.highlight.max_analyzed_offset` setting, which prevails when + it’s set to lower value than the query setting. + :arg no_match_size: The amount of text you want to return from the + beginning of the field if there are no matching fragments to + highlight. + :arg number_of_fragments: The maximum number of fragments to return. + If the number of fragments is set to `0`, no fragments are + returned. Instead, the entire field contents are highlighted and + returned. This can be handy when you need to highlight short texts + such as a title or address, but fragmentation is not required. If + `number_of_fragments` is `0`, `fragment_size` is ignored. Defaults + to `5` if omitted. + :arg options: + :arg order: Sorts highlighted fragments by score when set to `score`. + By default, fragments will be output in the order they appear in + the field (order: `none`). Setting this option to `score` will + output the most relevant fragments first. Each highlighter applies + its own logic to compute relevancy scores. Defaults to `none` if + omitted. + :arg phrase_limit: Controls the number of matching phrases in a + document that are considered. Prevents the `fvh` highlighter from + analyzing too many phrases and consuming too much memory. When + using `matched_fields`, `phrase_limit` phrases per matched field + are considered. Raising the limit increases query time and + consumes more memory. Only supported by the `fvh` highlighter. + Defaults to `256` if omitted. + :arg post_tags: Use in conjunction with `pre_tags` to define the HTML + tags to use for the highlighted text. By default, highlighted text + is wrapped in `` and `` tags. + :arg pre_tags: Use in conjunction with `post_tags` to define the HTML + tags to use for the highlighted text. By default, highlighted text + is wrapped in `` and `` tags. + :arg require_field_match: By default, only fields that contains a + query match are highlighted. Set to `false` to highlight all + fields. Defaults to `True` if omitted. + :arg tags_schema: Set to `styled` to use the built-in tag schema. + """ + + fragment_offset: Union[int, DefaultType] + matched_fields: Union[ + Union[str, InstrumentedField], + Sequence[Union[str, InstrumentedField]], + DefaultType, + ] + analyzer: Union[str, Dict[str, Any], DefaultType] + type: Union[Literal["plain", "fvh", "unified"], DefaultType] + boundary_chars: Union[str, DefaultType] + boundary_max_scan: Union[int, DefaultType] + boundary_scanner: Union[Literal["chars", "sentence", "word"], DefaultType] + boundary_scanner_locale: Union[str, DefaultType] + force_source: Union[bool, DefaultType] + fragmenter: Union[Literal["simple", "span"], DefaultType] + fragment_size: Union[int, DefaultType] + highlight_filter: Union[bool, DefaultType] + highlight_query: Union[Query, DefaultType] + max_fragment_length: Union[int, DefaultType] + max_analyzed_offset: Union[int, DefaultType] + no_match_size: Union[int, DefaultType] + number_of_fragments: Union[int, DefaultType] + options: Union[Mapping[str, Any], DefaultType] + order: Union[Literal["score"], DefaultType] + phrase_limit: Union[int, DefaultType] + post_tags: Union[Sequence[str], DefaultType] + pre_tags: Union[Sequence[str], DefaultType] + require_field_match: Union[bool, DefaultType] + tags_schema: Union[Literal["styled"], DefaultType] + + def __init__( + self, + *, + fragment_offset: Union[int, DefaultType] = DEFAULT, + matched_fields: Union[ + Union[str, InstrumentedField], + Sequence[Union[str, InstrumentedField]], + DefaultType, + ] = DEFAULT, + analyzer: Union[str, Dict[str, Any], DefaultType] = DEFAULT, + type: Union[Literal["plain", "fvh", "unified"], DefaultType] = DEFAULT, + boundary_chars: Union[str, DefaultType] = DEFAULT, + boundary_max_scan: Union[int, DefaultType] = DEFAULT, + boundary_scanner: Union[ + Literal["chars", "sentence", "word"], DefaultType + ] = DEFAULT, + boundary_scanner_locale: Union[str, DefaultType] = DEFAULT, + force_source: Union[bool, DefaultType] = DEFAULT, + fragmenter: Union[Literal["simple", "span"], DefaultType] = DEFAULT, + fragment_size: Union[int, DefaultType] = DEFAULT, + highlight_filter: Union[bool, DefaultType] = DEFAULT, + highlight_query: Union[Query, DefaultType] = DEFAULT, + max_fragment_length: Union[int, DefaultType] = DEFAULT, + max_analyzed_offset: Union[int, DefaultType] = DEFAULT, + no_match_size: Union[int, DefaultType] = DEFAULT, + number_of_fragments: Union[int, DefaultType] = DEFAULT, + options: Union[Mapping[str, Any], DefaultType] = DEFAULT, + order: Union[Literal["score"], DefaultType] = DEFAULT, + phrase_limit: Union[int, DefaultType] = DEFAULT, + post_tags: Union[Sequence[str], DefaultType] = DEFAULT, + pre_tags: Union[Sequence[str], DefaultType] = DEFAULT, + require_field_match: Union[bool, DefaultType] = DEFAULT, + tags_schema: Union[Literal["styled"], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if fragment_offset is not DEFAULT: + kwargs["fragment_offset"] = fragment_offset + if matched_fields is not DEFAULT: + kwargs["matched_fields"] = str(matched_fields) + if analyzer is not DEFAULT: + kwargs["analyzer"] = analyzer + if type is not DEFAULT: + kwargs["type"] = type + if boundary_chars is not DEFAULT: + kwargs["boundary_chars"] = boundary_chars + if boundary_max_scan is not DEFAULT: + kwargs["boundary_max_scan"] = boundary_max_scan + if boundary_scanner is not DEFAULT: + kwargs["boundary_scanner"] = boundary_scanner + if boundary_scanner_locale is not DEFAULT: + kwargs["boundary_scanner_locale"] = boundary_scanner_locale + if force_source is not DEFAULT: + kwargs["force_source"] = force_source + if fragmenter is not DEFAULT: + kwargs["fragmenter"] = fragmenter + if fragment_size is not DEFAULT: + kwargs["fragment_size"] = fragment_size + if highlight_filter is not DEFAULT: + kwargs["highlight_filter"] = highlight_filter + if highlight_query is not DEFAULT: + kwargs["highlight_query"] = highlight_query + if max_fragment_length is not DEFAULT: + kwargs["max_fragment_length"] = max_fragment_length + if max_analyzed_offset is not DEFAULT: + kwargs["max_analyzed_offset"] = max_analyzed_offset + if no_match_size is not DEFAULT: + kwargs["no_match_size"] = no_match_size + if number_of_fragments is not DEFAULT: + kwargs["number_of_fragments"] = number_of_fragments + if options is not DEFAULT: + kwargs["options"] = options + if order is not DEFAULT: + kwargs["order"] = order + if phrase_limit is not DEFAULT: + kwargs["phrase_limit"] = phrase_limit + if post_tags is not DEFAULT: + kwargs["post_tags"] = post_tags + if pre_tags is not DEFAULT: + kwargs["pre_tags"] = pre_tags + if require_field_match is not DEFAULT: + kwargs["require_field_match"] = require_field_match + if tags_schema is not DEFAULT: + kwargs["tags_schema"] = tags_schema + super().__init__(**kwargs) + + +class ScoreSort(AttrDict[Any]): + """ + :arg order: + """ + + order: Union[Literal["asc", "desc"], DefaultType] + + def __init__( + self, + *, + order: Union[Literal["asc", "desc"], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if order is not DEFAULT: + kwargs["order"] = order + super().__init__(kwargs) + + +class GeoDistanceSort(AttrDict[Any]): + """ + :arg mode: + :arg distance_type: + :arg ignore_unmapped: + :arg order: + :arg unit: + :arg nested: + """ + + mode: Union[Literal["min", "max", "sum", "avg", "median"], DefaultType] + distance_type: Union[Literal["arc", "plane"], DefaultType] + ignore_unmapped: Union[bool, DefaultType] + order: Union[Literal["asc", "desc"], DefaultType] + unit: Union[ + Literal["in", "ft", "yd", "mi", "nmi", "km", "m", "cm", "mm"], DefaultType + ] + nested: Union["NestedSortValue", Dict[str, Any], DefaultType] + + def __init__( + self, + *, + mode: Union[ + Literal["min", "max", "sum", "avg", "median"], DefaultType + ] = DEFAULT, + distance_type: Union[Literal["arc", "plane"], DefaultType] = DEFAULT, + ignore_unmapped: Union[bool, DefaultType] = DEFAULT, + order: Union[Literal["asc", "desc"], DefaultType] = DEFAULT, + unit: Union[ + Literal["in", "ft", "yd", "mi", "nmi", "km", "m", "cm", "mm"], DefaultType + ] = DEFAULT, + nested: Union["NestedSortValue", Dict[str, Any], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if mode is not DEFAULT: + kwargs["mode"] = mode + if distance_type is not DEFAULT: + kwargs["distance_type"] = distance_type + if ignore_unmapped is not DEFAULT: + kwargs["ignore_unmapped"] = ignore_unmapped + if order is not DEFAULT: + kwargs["order"] = order + if unit is not DEFAULT: + kwargs["unit"] = unit + if nested is not DEFAULT: + kwargs["nested"] = nested + super().__init__(kwargs) + + +class ScriptSort(AttrDict[Any]): + """ + :arg script: (required) + :arg order: + :arg type: + :arg mode: + :arg nested: + """ + + script: Union["Script", Dict[str, Any], DefaultType] + order: Union[Literal["asc", "desc"], DefaultType] + type: Union[Literal["string", "number", "version"], DefaultType] + mode: Union[Literal["min", "max", "sum", "avg", "median"], DefaultType] + nested: Union["NestedSortValue", Dict[str, Any], DefaultType] + + def __init__( + self, + *, + script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT, + order: Union[Literal["asc", "desc"], DefaultType] = DEFAULT, + type: Union[Literal["string", "number", "version"], DefaultType] = DEFAULT, + mode: Union[ + Literal["min", "max", "sum", "avg", "median"], DefaultType + ] = DEFAULT, + nested: Union["NestedSortValue", Dict[str, Any], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if script is not DEFAULT: + kwargs["script"] = script + if order is not DEFAULT: + kwargs["order"] = order + if type is not DEFAULT: + kwargs["type"] = type + if mode is not DEFAULT: + kwargs["mode"] = mode + if nested is not DEFAULT: + kwargs["nested"] = nested + super().__init__(kwargs) + + +class IntervalsContainer(AttrDict[Any]): + """ + :arg all_of: Returns matches that span a combination of other rules. + :arg any_of: Returns intervals produced by any of its sub-rules. + :arg fuzzy: Matches analyzed text. + :arg match: Matches analyzed text. + :arg prefix: Matches terms that start with a specified set of + characters. + :arg wildcard: Matches terms using a wildcard pattern. + """ + + all_of: Union["IntervalsAllOf", Dict[str, Any], DefaultType] + any_of: Union["IntervalsAnyOf", Dict[str, Any], DefaultType] + fuzzy: Union["IntervalsFuzzy", Dict[str, Any], DefaultType] + match: Union["IntervalsMatch", Dict[str, Any], DefaultType] + prefix: Union["IntervalsPrefix", Dict[str, Any], DefaultType] + wildcard: Union["IntervalsWildcard", Dict[str, Any], DefaultType] + + def __init__( + self, + *, + all_of: Union["IntervalsAllOf", Dict[str, Any], DefaultType] = DEFAULT, + any_of: Union["IntervalsAnyOf", Dict[str, Any], DefaultType] = DEFAULT, + fuzzy: Union["IntervalsFuzzy", Dict[str, Any], DefaultType] = DEFAULT, + match: Union["IntervalsMatch", Dict[str, Any], DefaultType] = DEFAULT, + prefix: Union["IntervalsPrefix", Dict[str, Any], DefaultType] = DEFAULT, + wildcard: Union["IntervalsWildcard", Dict[str, Any], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if all_of is not DEFAULT: + kwargs["all_of"] = all_of + if any_of is not DEFAULT: + kwargs["any_of"] = any_of + if fuzzy is not DEFAULT: + kwargs["fuzzy"] = fuzzy + if match is not DEFAULT: + kwargs["match"] = match + if prefix is not DEFAULT: + kwargs["prefix"] = prefix + if wildcard is not DEFAULT: + kwargs["wildcard"] = wildcard + super().__init__(kwargs) + + +class IntervalsFilter(AttrDict[Any]): + """ + :arg after: Query used to return intervals that follow an interval + from the `filter` rule. + :arg before: Query used to return intervals that occur before an + interval from the `filter` rule. + :arg contained_by: Query used to return intervals contained by an + interval from the `filter` rule. + :arg containing: Query used to return intervals that contain an + interval from the `filter` rule. + :arg not_contained_by: Query used to return intervals that are **not** + contained by an interval from the `filter` rule. + :arg not_containing: Query used to return intervals that do **not** + contain an interval from the `filter` rule. + :arg not_overlapping: Query used to return intervals that do **not** + overlap with an interval from the `filter` rule. + :arg overlapping: Query used to return intervals that overlap with an + interval from the `filter` rule. + :arg script: Script used to return matching documents. This script + must return a boolean value: `true` or `false`. + """ + + after: Union["IntervalsContainer", Dict[str, Any], DefaultType] + before: Union["IntervalsContainer", Dict[str, Any], DefaultType] + contained_by: Union["IntervalsContainer", Dict[str, Any], DefaultType] + containing: Union["IntervalsContainer", Dict[str, Any], DefaultType] + not_contained_by: Union["IntervalsContainer", Dict[str, Any], DefaultType] + not_containing: Union["IntervalsContainer", Dict[str, Any], DefaultType] + not_overlapping: Union["IntervalsContainer", Dict[str, Any], DefaultType] + overlapping: Union["IntervalsContainer", Dict[str, Any], DefaultType] + script: Union["Script", Dict[str, Any], DefaultType] + + def __init__( + self, + *, + after: Union["IntervalsContainer", Dict[str, Any], DefaultType] = DEFAULT, + before: Union["IntervalsContainer", Dict[str, Any], DefaultType] = DEFAULT, + contained_by: Union[ + "IntervalsContainer", Dict[str, Any], DefaultType + ] = DEFAULT, + containing: Union["IntervalsContainer", Dict[str, Any], DefaultType] = DEFAULT, + not_contained_by: Union[ + "IntervalsContainer", Dict[str, Any], DefaultType + ] = DEFAULT, + not_containing: Union[ + "IntervalsContainer", Dict[str, Any], DefaultType + ] = DEFAULT, + not_overlapping: Union[ + "IntervalsContainer", Dict[str, Any], DefaultType + ] = DEFAULT, + overlapping: Union["IntervalsContainer", Dict[str, Any], DefaultType] = DEFAULT, + script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if after is not DEFAULT: + kwargs["after"] = after + if before is not DEFAULT: + kwargs["before"] = before + if contained_by is not DEFAULT: + kwargs["contained_by"] = contained_by + if containing is not DEFAULT: + kwargs["containing"] = containing + if not_contained_by is not DEFAULT: + kwargs["not_contained_by"] = not_contained_by + if not_containing is not DEFAULT: + kwargs["not_containing"] = not_containing + if not_overlapping is not DEFAULT: + kwargs["not_overlapping"] = not_overlapping + if overlapping is not DEFAULT: + kwargs["overlapping"] = overlapping + if script is not DEFAULT: + kwargs["script"] = script + super().__init__(kwargs) + + +class NestedSortValue(AttrDict[Any]): + """ + :arg path: (required) + :arg filter: + :arg max_children: + :arg nested: + """ + + path: Union[str, InstrumentedField, DefaultType] + filter: Union[Query, DefaultType] + max_children: Union[int, DefaultType] + nested: Union["NestedSortValue", Dict[str, Any], DefaultType] + + def __init__( + self, + *, + path: Union[str, InstrumentedField, DefaultType] = DEFAULT, + filter: Union[Query, DefaultType] = DEFAULT, + max_children: Union[int, DefaultType] = DEFAULT, + nested: Union["NestedSortValue", Dict[str, Any], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if path is not DEFAULT: + kwargs["path"] = str(path) + if filter is not DEFAULT: + kwargs["filter"] = filter + if max_children is not DEFAULT: + kwargs["max_children"] = max_children + if nested is not DEFAULT: + kwargs["nested"] = nested + super().__init__(kwargs) diff --git a/elasticsearch_dsl/utils.py b/elasticsearch_dsl/utils.py index d9727f1e..3617589c 100644 --- a/elasticsearch_dsl/utils.py +++ b/elasticsearch_dsl/utils.py @@ -35,6 +35,7 @@ cast, ) +from elastic_transport.client_utils import DEFAULT from typing_extensions import Self, TypeAlias, TypeVar from .exceptions import UnknownDslObject, ValidationException @@ -162,6 +163,7 @@ class AttrDict(Generic[_ValT]): """ _d_: Dict[str, _ValT] + RESERVED: Dict[str, str] = {"from_": "from"} def __init__(self, d: Dict[str, _ValT]): # assign the inner dict manually to prevent __setattr__ from firing @@ -210,20 +212,20 @@ def __getattr__(self, attr_name: str) -> Any: def __delattr__(self, attr_name: str) -> None: try: - del self._d_[attr_name] + del self._d_[self.RESERVED.get(attr_name, attr_name)] except KeyError: raise AttributeError( f"{self.__class__.__name__!r} object has no attribute {attr_name!r}" ) def __getitem__(self, key: str) -> Any: - return _wrap(self._d_[key]) + return _wrap(self._d_[self.RESERVED.get(key, key)]) def __setitem__(self, key: str, value: _ValT) -> None: - self._d_[key] = value + self._d_[self.RESERVED.get(key, key)] = value def __delitem__(self, key: str) -> None: - del self._d_[key] + del self._d_[self.RESERVED.get(key, key)] def __setattr__(self, name: str, value: _ValT) -> None: # the __orig__class__ attribute has to be treated as an exception, as @@ -231,7 +233,7 @@ def __setattr__(self, name: str, value: _ValT) -> None: if ( name in self._d_ or not hasattr(self.__class__, name) ) and name != "__orig_class__": - self._d_[name] = value + self._d_[self.RESERVED.get(name, name)] = value else: # there is an attribute on the class (could be property, ..) - don't add it as field super().__setattr__(name, value) @@ -328,6 +330,8 @@ def __init__(self, _expand__to_dot: Optional[bool] = None, **params: Any) -> Non _expand__to_dot = EXPAND__TO_DOT self._params: Dict[str, Any] = {} for pname, pvalue in params.items(): + if pvalue == DEFAULT: + continue # expand "__" to dots if "__" in pname and _expand__to_dot: pname = pname.replace("__", ".") diff --git a/noxfile.py b/noxfile.py index 0f941e9b..98f8263f 100644 --- a/noxfile.py +++ b/noxfile.py @@ -57,8 +57,9 @@ def test(session): @nox.session(python="3.12") def format(session): - session.install("black~=24.0", "isort", "unasync", "setuptools") + session.install("black~=24.0", "isort", "unasync", "setuptools", ".[develop]") session.run("python", "utils/run-unasync.py") + session.run("python", "utils/generator.py", env={"PYTHONPATH": "./"}) session.run("black", "--target-version=py38", *SOURCE_FILES) session.run("isort", *SOURCE_FILES) session.run("python", "utils/license-headers.py", "fix", *SOURCE_FILES) diff --git a/setup.py b/setup.py index 7f1a1f60..3b2c71e9 100644 --- a/setup.py +++ b/setup.py @@ -31,6 +31,7 @@ "python-dateutil", "typing-extensions", "elasticsearch>=8.0.0,<9.0.0", + "elastic-transport>=8.0.0,<9.0.0", ] async_requires = [ @@ -40,6 +41,7 @@ develop_requires = [ "elasticsearch[async]", "unasync", + "jinja2", "pytest", "pytest-cov", "pytest-mock", diff --git a/tests/_async/test_search.py b/tests/_async/test_search.py index 28e07a50..8a338da9 100644 --- a/tests/_async/test_search.py +++ b/tests/_async/test_search.py @@ -21,7 +21,15 @@ import pytest from pytest import raises -from elasticsearch_dsl import AsyncEmptySearch, AsyncSearch, Document, Q, query +from elasticsearch_dsl import ( + AsyncEmptySearch, + AsyncSearch, + Document, + Q, + query, + types, + wrappers, +) from elasticsearch_dsl.exceptions import IllegalOperation @@ -479,22 +487,26 @@ def test_complex_example() -> None: def test_reverse() -> None: d = { "query": { - "filtered": { - "filter": { - "bool": { - "should": [ - {"term": {"category": "meetup"}}, - {"term": {"category": "conference"}}, - ] + "bool": { + "filter": [ + { + "bool": { + "should": [ + {"term": {"category": "meetup"}}, + {"term": {"category": "conference"}}, + ] + } } - }, - "query": { - "bool": { - "must": [{"match": {"title": "python"}}], - "must_not": [{"match": {"title": "ruby"}}], - "minimum_should_match": 2, + ], + "must": [ + { + "bool": { + "must": [{"match": {"title": "python"}}], + "must_not": [{"match": {"title": "ruby"}}], + "minimum_should_match": 2, + } } - }, + ], } }, "post_filter": {"bool": {"must": [{"terms": {"tags": ["prague", "czech"]}}]}}, @@ -526,6 +538,74 @@ def test_reverse() -> None: assert d == s.to_dict() +def test_code_generated_classes() -> None: + s = AsyncSearch() + s = ( + s.query(query.Match("title", types.MatchQuery(query="python"))) + .query(~query.Match("title", types.MatchQuery(query="ruby"))) + .query( + query.Knn( + field="title", + query_vector=[1.0, 2.0, 3.0], + num_candidates=10, + k=3, + filter=query.Range("year", wrappers.Range(gt="2004")), + ) + ) + .filter( + query.Term("category", types.TermQuery(value="meetup")) + | query.Term("category", types.TermQuery(value="conference")) + ) + .collapse("user_id") + .post_filter(query.Terms(tags=["prague", "czech"])) + .script_fields(more_attendees="doc['attendees'].value + 42") + ) + assert { + "query": { + "bool": { + "filter": [ + { + "bool": { + "should": [ + {"term": {"category": {"value": "meetup"}}}, + {"term": {"category": {"value": "conference"}}}, + ] + } + } + ], + "must": [ + {"match": {"title": {"query": "python"}}}, + { + "knn": { + "field": "title", + "filter": [ + { + "range": { + "year": { + "gt": "2004", + }, + }, + }, + ], + "k": 3, + "num_candidates": 10, + "query_vector": [ + 1.0, + 2.0, + 3.0, + ], + }, + }, + ], + "must_not": [{"match": {"title": {"query": "ruby"}}}], + } + }, + "post_filter": {"terms": {"tags": ["prague", "czech"]}}, + "collapse": {"field": "user_id"}, + "script_fields": {"more_attendees": {"script": "doc['attendees'].value + 42"}}, + } == s.to_dict() + + def test_from_dict_doesnt_need_query() -> None: s = AsyncSearch.from_dict({"size": 5}) diff --git a/tests/_async/test_update_by_query.py b/tests/_async/test_update_by_query.py index 4bde5ee0..a7dba6ab 100644 --- a/tests/_async/test_update_by_query.py +++ b/tests/_async/test_update_by_query.py @@ -101,22 +101,26 @@ def test_exclude() -> None: def test_reverse() -> None: d = { "query": { - "filtered": { - "filter": { - "bool": { - "should": [ - {"term": {"category": "meetup"}}, - {"term": {"category": "conference"}}, - ] + "bool": { + "filter": [ + { + "bool": { + "should": [ + {"term": {"category": "meetup"}}, + {"term": {"category": "conference"}}, + ] + } } - }, - "query": { - "bool": { - "must": [{"match": {"title": "python"}}], - "must_not": [{"match": {"title": "ruby"}}], - "minimum_should_match": 2, + ], + "must": [ + { + "bool": { + "must": [{"match": {"title": "python"}}], + "must_not": [{"match": {"title": "ruby"}}], + "minimum_should_match": 2, + } } - }, + ], } }, "script": { diff --git a/tests/_sync/test_search.py b/tests/_sync/test_search.py index d777508f..ee87b788 100644 --- a/tests/_sync/test_search.py +++ b/tests/_sync/test_search.py @@ -21,7 +21,7 @@ import pytest from pytest import raises -from elasticsearch_dsl import Document, EmptySearch, Q, Search, query +from elasticsearch_dsl import Document, EmptySearch, Q, Search, query, types, wrappers from elasticsearch_dsl.exceptions import IllegalOperation @@ -479,22 +479,26 @@ def test_complex_example() -> None: def test_reverse() -> None: d = { "query": { - "filtered": { - "filter": { - "bool": { - "should": [ - {"term": {"category": "meetup"}}, - {"term": {"category": "conference"}}, - ] + "bool": { + "filter": [ + { + "bool": { + "should": [ + {"term": {"category": "meetup"}}, + {"term": {"category": "conference"}}, + ] + } } - }, - "query": { - "bool": { - "must": [{"match": {"title": "python"}}], - "must_not": [{"match": {"title": "ruby"}}], - "minimum_should_match": 2, + ], + "must": [ + { + "bool": { + "must": [{"match": {"title": "python"}}], + "must_not": [{"match": {"title": "ruby"}}], + "minimum_should_match": 2, + } } - }, + ], } }, "post_filter": {"bool": {"must": [{"terms": {"tags": ["prague", "czech"]}}]}}, @@ -526,6 +530,74 @@ def test_reverse() -> None: assert d == s.to_dict() +def test_code_generated_classes() -> None: + s = Search() + s = ( + s.query(query.Match("title", types.MatchQuery(query="python"))) + .query(~query.Match("title", types.MatchQuery(query="ruby"))) + .query( + query.Knn( + field="title", + query_vector=[1.0, 2.0, 3.0], + num_candidates=10, + k=3, + filter=query.Range("year", wrappers.Range(gt="2004")), + ) + ) + .filter( + query.Term("category", types.TermQuery(value="meetup")) + | query.Term("category", types.TermQuery(value="conference")) + ) + .collapse("user_id") + .post_filter(query.Terms(tags=["prague", "czech"])) + .script_fields(more_attendees="doc['attendees'].value + 42") + ) + assert { + "query": { + "bool": { + "filter": [ + { + "bool": { + "should": [ + {"term": {"category": {"value": "meetup"}}}, + {"term": {"category": {"value": "conference"}}}, + ] + } + } + ], + "must": [ + {"match": {"title": {"query": "python"}}}, + { + "knn": { + "field": "title", + "filter": [ + { + "range": { + "year": { + "gt": "2004", + }, + }, + }, + ], + "k": 3, + "num_candidates": 10, + "query_vector": [ + 1.0, + 2.0, + 3.0, + ], + }, + }, + ], + "must_not": [{"match": {"title": {"query": "ruby"}}}], + } + }, + "post_filter": {"terms": {"tags": ["prague", "czech"]}}, + "collapse": {"field": "user_id"}, + "script_fields": {"more_attendees": {"script": "doc['attendees'].value + 42"}}, + } == s.to_dict() + + def test_from_dict_doesnt_need_query() -> None: s = Search.from_dict({"size": 5}) diff --git a/tests/_sync/test_update_by_query.py b/tests/_sync/test_update_by_query.py index 68d89c50..47ccb301 100644 --- a/tests/_sync/test_update_by_query.py +++ b/tests/_sync/test_update_by_query.py @@ -101,22 +101,26 @@ def test_exclude() -> None: def test_reverse() -> None: d = { "query": { - "filtered": { - "filter": { - "bool": { - "should": [ - {"term": {"category": "meetup"}}, - {"term": {"category": "conference"}}, - ] + "bool": { + "filter": [ + { + "bool": { + "should": [ + {"term": {"category": "meetup"}}, + {"term": {"category": "conference"}}, + ] + } } - }, - "query": { - "bool": { - "must": [{"match": {"title": "python"}}], - "must_not": [{"match": {"title": "ruby"}}], - "minimum_should_match": 2, + ], + "must": [ + { + "bool": { + "must": [{"match": {"title": "python"}}], + "must_not": [{"match": {"title": "ruby"}}], + "minimum_should_match": 2, + } } - }, + ], } }, "script": { diff --git a/tests/test_query.py b/tests/test_query.py index 64d8ba2f..6be9a80d 100644 --- a/tests/test_query.py +++ b/tests/test_query.py @@ -124,7 +124,7 @@ def test_query_clone() -> None: def test_bool_converts_its_init_args_to_queries() -> None: - q = query.Bool(must=[{"match": {"f": "value"}}]) + q = query.Bool(must=[{"match": {"f": "value"}}]) # type: ignore assert len(q.must) == 1 assert q.must[0] == query.Match(f="value") diff --git a/tests/test_utils.py b/tests/test_utils.py index fe417d2f..7081e9be 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -125,3 +125,12 @@ def test_attrlist_to_list() -> None: l = utils.AttrList[Any]([{}, {}]).to_list() assert isinstance(l, list) assert l == [{}, {}] + + +def test_attrdict_with_reserved_keyword() -> None: + d = utils.AttrDict({"from": 10, "size": 20}) + assert d.from_ == 10 + assert d.size == 20 + d = utils.AttrDict({}) + d.from_ = 10 + assert {"from": 10} == d.to_dict() diff --git a/utils/generator.py b/utils/generator.py new file mode 100644 index 00000000..c0a4f3a0 --- /dev/null +++ b/utils/generator.py @@ -0,0 +1,545 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import json +import re +import textwrap +from urllib.error import HTTPError +from urllib.request import urlopen + +from jinja2 import Environment, PackageLoader, select_autoescape + +from elasticsearch_dsl import VERSION + +jinja_env = Environment( + loader=PackageLoader("utils"), + autoescape=select_autoescape(), + trim_blocks=True, + lstrip_blocks=True, +) +query_py = jinja_env.get_template("query.py.tpl") +types_py = jinja_env.get_template("types.py.tpl") + +# map with name replacements for Elasticsearch attributes +PROP_REPLACEMENTS = {"from": "from_"} + +# map with Elasticsearch type replacements +# keys and values are in given in "{namespace}:{name}" format +TYPE_REPLACEMENTS = { + "_types.query_dsl:DistanceFeatureQuery": "_types.query_dsl:DistanceFeatureQueryBase", +} + + +def wrapped_doc(text, width=70, initial_indent="", subsequent_indent=""): + """Formats a docstring as a list of lines of up to the request width.""" + return textwrap.wrap( + text.replace("\n", " "), + width=width, + initial_indent=initial_indent, + subsequent_indent=subsequent_indent, + ) + + +def add_dict_type(type_): + """Add Dict[str, Any] to a Python type hint.""" + if type_.startswith("Union["): + type_ = f"{type_[:-1]}, Dict[str, Any]]" + else: + type_ = f"Union[{type_}, Dict[str, Any]]" + return type_ + + +def add_not_set(type_): + """Add DefaultType to a Python type hint.""" + if type_.startswith("Union["): + type_ = f'{type_[:-1]}, "DefaultType"]' + else: + type_ = f'Union[{type_}, "DefaultType"]' + return type_ + + +class ElasticsearchSchema: + """Operations related to the Elasticsearch schema.""" + + def __init__(self): + response = None + for branch in [f"{VERSION[0]}.{VERSION[1]}", "main"]: + url = f"https://raw.githubusercontent.com/elastic/elasticsearch-specification/{branch}/output/schema/schema.json" + try: + response = urlopen(url) + print(f"Initializing code generation with '{branch}' specification.") + break + except HTTPError: + continue + if not response: + raise RuntimeError("Could not download Elasticsearch schema") + self.schema = json.loads(response.read()) + + # Interfaces collects interfaces that are seen while traversing the schema. + # Any interfaces collected here are then rendered as Python in the + # types.py module. + self.interfaces = [] + + def find_type(self, name, namespace=None): + for t in self.schema["types"]: + if t["name"]["name"] == name and ( + namespace is None or t["name"]["namespace"] == namespace + ): + return t + + def get_python_type(self, schema_type): + """Obtain Python typing details for a given schema type + + This method returns a tuple. The first element is a string with the + Python type hint. The second element is a dictionary with Python DSL + specific typing details to be stored in the DslBase._param_defs + attribute (or None if the type does not need to be in _param_defs). + """ + if schema_type["kind"] == "instance_of": + type_name = schema_type["type"] + if type_name["namespace"] in ["_types", "internal", "_builtins"]: + if type_name["name"] in ["integer", "uint", "long", "ulong"]: + return "int", None + elif type_name["name"] in ["number", "float", "double"]: + return "float", None + elif type_name["name"] == "string": + return "str", None + elif type_name["name"] == "boolean": + return "bool", None + elif type_name["name"] == "binary": + return "bytes", None + elif type_name["name"] == "null": + return "None", None + elif type_name["name"] == "Field": + return 'Union[str, "InstrumentedField"]', None + else: + # not an instance of a native type, so we get the type and try again + return self.get_python_type( + self.find_type(type_name["name"], type_name["namespace"]) + ) + elif ( + type_name["namespace"] == "_types.query_dsl" + and type_name["name"] == "QueryContainer" + ): + # QueryContainer maps to the DSL's Query class + return "Query", {"type": "query"} + else: + # for any other instances we get the type and recurse + type_ = self.find_type(type_name["name"], type_name["namespace"]) + if type_: + return self.get_python_type(type_) + + elif schema_type["kind"] == "type_alias": + # for an alias, we use the aliased type + return self.get_python_type(schema_type["type"]) + + elif schema_type["kind"] == "array_of": + # for arrays we use Sequence[element_type] + type_, param = self.get_python_type(schema_type["value"]) + return f"Sequence[{type_}]", {**param, "multi": True} if param else None + + elif schema_type["kind"] == "dictionary_of": + # for dicts we use Mapping[key_type, value_type] + key_type, key_param = self.get_python_type(schema_type["key"]) + value_type, value_param = self.get_python_type(schema_type["value"]) + return f"Mapping[{key_type}, {value_type}]", None + + elif schema_type["kind"] == "union_of": + if ( + len(schema_type["items"]) == 2 + and schema_type["items"][0]["kind"] == "instance_of" + and schema_type["items"][1]["kind"] == "array_of" + and schema_type["items"][0] == schema_type["items"][1]["value"] + ): + # special kind of unions in the form Union[type, Sequence[type]] + type_, param = self.get_python_type(schema_type["items"][0]) + return ( + f"Union[{type_}, Sequence[{type_}]]", + ({"type": param["type"], "multi": True} if param else None), + ) + elif ( + len(schema_type["items"]) == 2 + and schema_type["items"][0]["kind"] == "instance_of" + and schema_type["items"][1]["kind"] == "instance_of" + and schema_type["items"][0]["type"] + == {"name": "T", "namespace": "_spec_utils.PipeSeparatedFlags"} + and schema_type["items"][1]["type"] + == {"name": "string", "namespace": "_builtins"} + ): + # for now we treat PipeSeparatedFlags as a special case + if "PipeSeparatedFlags" not in self.interfaces: + self.interfaces.append("PipeSeparatedFlags") + return '"types.PipeSeparatedFlags"', None + else: + # generic union type + types = list( + dict.fromkeys( # eliminate duplicates + [self.get_python_type(t) for t in schema_type["items"]] + ) + ) + return "Union[" + ", ".join([type_ for type_, _ in types]) + "]", None + + elif schema_type["kind"] == "enum": + # enums are mapped to Literal[member, ...] + return ( + "Literal[" + + ", ".join( + [f"\"{member['name']}\"" for member in schema_type["members"]] + ) + + "]", + None, + ) + + elif schema_type["kind"] == "interface": + if schema_type["name"]["namespace"] == "_types.query_dsl": + # handle specific DSL classes explicitly to map to existing + # Python DSL classes + if schema_type["name"]["name"].endswith("RangeQuery"): + return '"wrappers.Range[Any]"', None + elif schema_type["name"]["name"].endswith("ScoreFunction"): + # When dropping Python 3.8, use `removesuffix("Function")` instead + name = schema_type["name"]["name"][:-8] + return f'"function.{name}"', None + elif schema_type["name"]["name"].endswith("DecayFunction"): + return '"function.DecayFunction"', None + elif schema_type["name"]["name"].endswith("Function"): + return f"\"function.{schema_type['name']['name']}\"", None + elif schema_type["name"]["namespace"] == "_types.analysis" and schema_type[ + "name" + ]["name"].endswith("Analyzer"): + # not expanding analyzers at this time, maybe in the future + return "str, Dict[str, Any]", None + + # to handle other interfaces we generate a type of the same name + # and add the interface to the interfaces.py module + if schema_type["name"]["name"] not in self.interfaces: + self.interfaces.append(schema_type["name"]["name"]) + return f"\"types.{schema_type['name']['name']}\"", None + elif schema_type["kind"] == "user_defined_value": + # user_defined_value maps to Python's Any type + return "Any", None + + raise RuntimeError(f"Cannot find Python type for {schema_type}") + + def add_attribute(self, k, arg, for_types_py=False): + """Add an attribute to the internal representation of a class. + + This method adds the argument `arg` to the data structure for a class + stored in `k`. In particular, the argument is added to the `k["args"]` + list, making sure required arguments are first in the list. If the + argument is of a type that needs Python DSL specific typing details to + be stored in the DslBase._param_defs attribute, then this is added to + `k["params"]`. + + When `for_types_py` is `True`, type hints are formatted in the most + convenient way for the types.py file. When possible, double quotes are + removed from types, and for types that are in the same file the quotes + are kept to prevent forward references, but the "types." namespace is + removed. When `for_types_py` is `False`, all non-native types use + quotes and are namespaced. + """ + try: + type_, param = schema.get_python_type(arg["type"]) + except RuntimeError: + type_ = "Any" + param = None + if type_ != "Any": + if "types." in type_: + type_ = add_dict_type(type_) # interfaces can be given as dicts + type_ = add_not_set(type_) + if for_types_py: + type_ = type_.replace('"DefaultType"', "DefaultType") + type_ = type_.replace('"InstrumentedField"', "InstrumentedField") + type_ = re.sub(r'"(function\.[a-zA-Z0-9_]+)"', r"\1", type_) + type_ = re.sub(r'"types\.([a-zA-Z0-9_]+)"', r'"\1"', type_) + type_ = re.sub(r'"(wrappers\.[a-zA-Z0-9_]+)"', r"\1", type_) + required = "(required) " if arg["required"] else "" + server_default = ( + f" Defaults to `{arg['serverDefault']}` if omitted." + if arg.get("serverDefault") + else "" + ) + doc = wrapped_doc( + f":arg {arg['name']}: {required}{arg.get('description', '')}{server_default}", + subsequent_indent=" ", + ) + arg = { + "name": PROP_REPLACEMENTS.get(arg["name"], arg["name"]), + "type": type_, + "doc": doc, + "required": arg["required"], + } + if param is not None: + param = {"name": arg["name"], "param": param} + if arg["required"]: + # insert in the right place so that all required arguments + # appear at the top of the argument list + i = 0 + for i in range(len(k["args"]) + 1): + if i == len(k["args"]): + break + if k["args"][i].get("positional"): + continue + if k["args"][i]["required"] is False: + break + k["args"].insert(i, arg) + else: + k["args"].append(arg) + if param and "params" in k: + k["params"].append(param) + + def property_to_python_class(self, p): + """Return a dictionary with template data necessary to render a schema + property as a Python class. + + Used for "container" sub-classes such as `QueryContainer`, where each + sub-class is represented by a Python DSL class. + + The format is as follows: + + ```python + { + "property_name": "the name of the property", + "name": "the class name to use for the property", + "docstring": "the formatted docstring as a list of strings", + "args": [ # a Python description of each class attribute + "name": "the name of the attribute", + "type": "the Python type hint for the attribute", + "doc": ["formatted lines of documentation to add to class docstring"], + "required": bool, + "positional": bool, + ], + "params": [ + "name": "the attribute name", + "param": "the param dictionary to include in `_param_defs` for the class", + ], # a DSL-specific description of interesting attributes + "is_single_field": bool # True for single-key dicts with field key + "is_multi_field": bool # True for multi-key dicts with field keys + } + ``` + """ + k = { + "property_name": p["name"], + "name": "".join([w.title() for w in p["name"].split("_")]), + } + k["docstring"] = wrapped_doc(p.get("description") or "") + kind = p["type"]["kind"] + if kind == "instance_of": + namespace = p["type"]["type"]["namespace"] + name = p["type"]["type"]["name"] + if f"{namespace}:{name}" in TYPE_REPLACEMENTS: + namespace, name = TYPE_REPLACEMENTS[f"{namespace}:{name}"].split(":") + type_ = schema.find_type(name, namespace) + if type_["kind"] == "interface": + k["args"] = [] + k["params"] = [] + if "behaviors" in type_: + for behavior in type_["behaviors"]: + if ( + behavior["type"]["name"] != "AdditionalProperty" + or behavior["type"]["namespace"] != "_spec_utils" + ): + # we do not support this behavior, so we ignore it + continue + key_type, _ = schema.get_python_type(behavior["generics"][0]) + if "InstrumentedField" in key_type: + value_type, _ = schema.get_python_type( + behavior["generics"][1] + ) + k["args"].append( + { + "name": "_field", + "type": add_not_set(key_type), + "doc": [ + ":arg _field: The field to use in this query." + ], + "required": False, + "positional": True, + } + ) + k["args"].append( + { + "name": "_value", + "type": add_not_set(add_dict_type(value_type)), + "doc": [ + ":arg _value: The query value for the field." + ], + "required": False, + "positional": True, + } + ) + k["is_single_field"] = True + else: + raise RuntimeError( + f"Non-field AdditionalProperty are not supported for interface {namespace}:{name}." + ) + while True: + for arg in type_["properties"]: + self.add_attribute(k, arg) + if "inherits" in type_ and "type" in type_["inherits"]: + type_ = schema.find_type( + type_["inherits"]["type"]["name"], + type_["inherits"]["type"]["namespace"], + ) + else: + break + else: + raise RuntimeError( + f"Cannot generate code for instances of kind '{type_['kind']}'" + ) + + elif kind == "dictionary_of": + key_type, _ = schema.get_python_type(p["type"]["key"]) + if "InstrumentedField" in key_type: + value_type, _ = schema.get_python_type(p["type"]["value"]) + if p["type"]["singleKey"]: + # special handling for single-key dicts with field key + k["args"] = [ + { + "name": "_field", + "type": add_not_set(key_type), + "doc": [":arg _field: The field to use in this query."], + "required": False, + "positional": True, + }, + { + "name": "_value", + "type": add_not_set(add_dict_type(value_type)), + "doc": [":arg _value: The query value for the field."], + "required": False, + "positional": True, + }, + ] + k["is_single_field"] = True + else: + # special handling for multi-key dicts with field keys + k["args"] = [ + { + "name": "_fields", + "type": f"Optional[Mapping[{key_type}, {value_type}]]", + "doc": [ + ":arg _fields: A dictionary of fields with their values." + ], + "required": False, + "positional": True, + }, + ] + k["is_multi_field"] = True + else: + raise RuntimeError(f"Cannot generate code for type {p['type']}") + + else: + raise RuntimeError(f"Cannot generate code for type {p['type']}") + return k + + def interface_to_python_class(self, interface, interfaces): + """Return a dictionary with template data necessary to render an + interface a Python class. + + This is used to render interfaces that are referenced by container + classes. The current list of rendered interfaces is passed as a second + argument to allow this method to add more interfaces to it as they are + discovered. + + The returned format is as follows: + + ```python + { + "name": "the class name to use for the interface class", + "parent": "the parent class name", + "args": [ # a Python description of each class attribute + "name": "the name of the attribute", + "type": "the Python type hint for the attribute", + "doc": ["formatted lines of documentation to add to class docstring"], + "required": bool, + "positional": bool, + ], + } + ``` + """ + type_ = schema.find_type(interface) + if type_["kind"] != "interface": + raise RuntimeError(f"Type {interface} is not an interface") + k = {"name": interface, "args": []} + while True: + for arg in type_["properties"]: + schema.add_attribute(k, arg, for_types_py=True) + + if "inherits" not in type_ or "type" not in type_["inherits"]: + break + + if "parent" not in k: + k["parent"] = type_["inherits"]["type"]["name"] + if type_["inherits"]["type"]["name"] not in interfaces: + interfaces.append(type_["inherits"]["type"]["name"]) + type_ = schema.find_type( + type_["inherits"]["type"]["name"], + type_["inherits"]["type"]["namespace"], + ) + return k + + +def generate_query_py(schema, filename): + """Generate query.py with all the properties of `QueryContainer` as Python + classes. + """ + classes = [] + query_container = schema.find_type("QueryContainer", "_types.query_dsl") + for p in query_container["properties"]: + classes.append(schema.property_to_python_class(p)) + + with open(filename, "wt") as f: + f.write(query_py.render(classes=classes, parent="Query")) + print(f"Generated {filename}.") + + +def generate_types_py(schema, filename): + """Generate types.py""" + classes = {} + schema.interfaces = sorted(schema.interfaces) + for interface in schema.interfaces: + if interface == "PipeSeparatedFlags": + continue # handled as a special case + k = schema.interface_to_python_class(interface, schema.interfaces) + classes[k["name"]] = k + + classes_list = [] + for n in classes: + k = classes[n] + if k in classes_list: + continue + classes_list.append(k) + parent = k.get("parent") + parent_index = len(classes_list) - 1 + while parent: + try: + classes_list.index(classes[parent]) + break + except ValueError: + pass + classes_list.insert(parent_index, classes[parent]) + parent = classes[parent].get("parent") + + with open(filename, "wt") as f: + f.write(types_py.render(classes=classes_list)) + print(f"Generated {filename}.") + + +if __name__ == "__main__": + schema = ElasticsearchSchema() + generate_query_py(schema, "elasticsearch_dsl/query.py") + generate_types_py(schema, "elasticsearch_dsl/types.py") diff --git a/utils/templates/query.py.tpl b/utils/templates/query.py.tpl new file mode 100644 index 00000000..23b23809 --- /dev/null +++ b/utils/templates/query.py.tpl @@ -0,0 +1,374 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import collections.abc +from copy import deepcopy +from itertools import chain +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ClassVar, + Dict, + List, + Literal, + Mapping, + MutableMapping, + Optional, + Protocol, + Sequence, + TypeVar, + Union, + cast, + overload, +) + +from elastic_transport.client_utils import DEFAULT + +# 'SF' looks unused but the test suite assumes it's available +# from this module so others are liable to do so as well. +from .function import SF # noqa: F401 +from .function import ScoreFunction +from .utils import DslBase + +if TYPE_CHECKING: + from elastic_transport.client_utils import DefaultType + from .document_base import InstrumentedField + from elasticsearch_dsl import types, wrappers + +_T = TypeVar("_T") +_M = TypeVar("_M", bound=Mapping[str, Any]) + + +class QProxiedProtocol(Protocol[_T]): + _proxied: _T + + +@overload +def Q(name_or_query: MutableMapping[str, _M]) -> "Query": ... + + +@overload +def Q(name_or_query: "Query") -> "Query": ... + + +@overload +def Q(name_or_query: QProxiedProtocol[_T]) -> _T: ... + + +@overload +def Q(name_or_query: str = "match_all", **params: Any) -> "Query": ... + + +def Q( + name_or_query: Union[ + str, + "Query", + QProxiedProtocol[_T], + MutableMapping[str, _M], + ] = "match_all", + **params: Any, +) -> Union["Query", _T]: + # {"match": {"title": "python"}} + if isinstance(name_or_query, collections.abc.MutableMapping): + if params: + raise ValueError("Q() cannot accept parameters when passing in a dict.") + if len(name_or_query) != 1: + raise ValueError( + 'Q() can only accept dict with a single query ({"match": {...}}). ' + "Instead it got (%r)" % name_or_query + ) + name, q_params = deepcopy(name_or_query).popitem() + return Query.get_dsl_class(name)(_expand__to_dot=False, **q_params) + + # MatchAll() + if isinstance(name_or_query, Query): + if params: + raise ValueError( + "Q() cannot accept parameters when passing in a Query object." + ) + return name_or_query + + # s.query = Q('filtered', query=s.query) + if hasattr(name_or_query, "_proxied"): + return cast(QProxiedProtocol[_T], name_or_query)._proxied + + # "match", title="python" + return Query.get_dsl_class(name_or_query)(**params) + + +class Query(DslBase): + _type_name = "query" + _type_shortcut = staticmethod(Q) + name: ClassVar[Optional[str]] = None + + # Add type annotations for methods not defined in every subclass + __ror__: ClassVar[Callable[["Query", "Query"], "Query"]] + __radd__: ClassVar[Callable[["Query", "Query"], "Query"]] + __rand__: ClassVar[Callable[["Query", "Query"], "Query"]] + + def __add__(self, other: "Query") -> "Query": + # make sure we give queries that know how to combine themselves + # preference + if hasattr(other, "__radd__"): + return other.__radd__(self) + return Bool(must=[self, other]) + + def __invert__(self) -> "Query": + return Bool(must_not=[self]) + + def __or__(self, other: "Query") -> "Query": + # make sure we give queries that know how to combine themselves + # preference + if hasattr(other, "__ror__"): + return other.__ror__(self) + return Bool(should=[self, other]) + + def __and__(self, other: "Query") -> "Query": + # make sure we give queries that know how to combine themselves + # preference + if hasattr(other, "__rand__"): + return other.__rand__(self) + return Bool(must=[self, other]) + + +{% for k in classes %} +class {{ k.name }}({{ parent }}): + """ + {% for line in k.docstring %} + {{ line }} + {% endfor %} + {% if k.args %} + {% if k.docstring %} + + {% endif %} + {% for kwarg in k.args %} + {% for line in kwarg.doc %} + {{ line }} + {% endfor %} + {% endfor %} + {% endif %} + """ + name = "{{ k.property_name }}" + {% if k.params %} + _param_defs = { + {% for param in k.params %} + "{{ param.name }}": {{ param.param }}, + {% endfor %} + {% if k.name == "FunctionScore" %} + {# The FunctionScore class implements a custom solution for the `functions` + shortcut property. Until the code generator can support shortcut + properties directly that solution is added here #} + "filter": {"type": "query"}, + "functions": {"type": "score_function", "multi": True}, + {% endif %} + } + {% endif %} + + def __init__( + self, + {% for arg in k.args %} + {% if arg.positional %} + {{ arg.name }}: {{ arg.type }} = DEFAULT, + {% endif %} + {% endfor %} + {% if k.args and not k.args[-1].positional %} + *, + {% endif %} + {% for arg in k.args %} + {% if not arg.positional %} + {{ arg.name }}: {{ arg.type }} = DEFAULT, + {% endif %} + {% endfor %} + **kwargs: Any + ): + {% if k.name == "FunctionScore" %} + {# continuation of the FunctionScore shortcut property support from above #} + if functions is DEFAULT: + functions = [] + for name in ScoreFunction._classes: + if name in kwargs: + functions.append({name: kwargs.pop(name)}) # type: ignore + {% elif k.is_single_field %} + if _field is not DEFAULT: + kwargs[str(_field)] = _value + {% elif k.is_multi_field %} + if _fields is not DEFAULT: + for field, value in _fields.items(): + kwargs[str(field)] = value + {% endif %} + super().__init__( + {% for arg in k.args %} + {% if not arg.positional %} + {{ arg.name }}={{ arg.name }}, + {% endif %} + {% endfor %} + **kwargs + ) + + {# what follows is a set of Pythonic enhancements to some of the query classes + which are outside the scope of the code generator #} + {% if k.name == "MatchAll" %} + def __add__(self, other: "Query") -> "Query": + return other._clone() + + __and__ = __rand__ = __radd__ = __add__ + + def __or__(self, other: "Query") -> "MatchAll": + return self + + __ror__ = __or__ + + def __invert__(self) -> "MatchNone": + return MatchNone() + + +EMPTY_QUERY = MatchAll() + + {% elif k.name == "MatchNone" %} + def __add__(self, other: "Query") -> "MatchNone": + return self + + __and__ = __rand__ = __radd__ = __add__ + + def __or__(self, other: "Query") -> "Query": + return other._clone() + + __ror__ = __or__ + + def __invert__(self) -> MatchAll: + return MatchAll() + + {% elif k.name == "Bool" %} + def __add__(self, other: Query) -> "Bool": + q = self._clone() + if isinstance(other, Bool): + q.must += other.must + q.should += other.should + q.must_not += other.must_not + q.filter += other.filter + else: + q.must.append(other) + return q + + __radd__ = __add__ + + def __or__(self, other: Query) -> Query: + for q in (self, other): + if isinstance(q, Bool) and not any( + (q.must, q.must_not, q.filter, getattr(q, "minimum_should_match", None)) + ): + other = self if q is other else other + q = q._clone() + if isinstance(other, Bool) and not any( + ( + other.must, + other.must_not, + other.filter, + getattr(other, "minimum_should_match", None), + ) + ): + q.should.extend(other.should) + else: + q.should.append(other) + return q + + return Bool(should=[self, other]) + + __ror__ = __or__ + + @property + def _min_should_match(self) -> int: + return getattr( + self, + "minimum_should_match", + 0 if not self.should or (self.must or self.filter) else 1, + ) + + def __invert__(self) -> Query: + # Because an empty Bool query is treated like + # MatchAll the inverse should be MatchNone + if not any(chain(self.must, self.filter, self.should, self.must_not)): + return MatchNone() + + negations: List[Query] = [] + for q in chain(self.must, self.filter): + negations.append(~q) + + for q in self.must_not: + negations.append(q) + + if self.should and self._min_should_match: + negations.append(Bool(must_not=self.should[:])) + + if len(negations) == 1: + return negations[0] + return Bool(should=negations) + + def __and__(self, other: Query) -> Query: + q = self._clone() + if isinstance(other, Bool): + q.must += other.must + q.must_not += other.must_not + q.filter += other.filter + q.should = [] + + # reset minimum_should_match as it will get calculated below + if "minimum_should_match" in q._params: + del q._params["minimum_should_match"] + + for qx in (self, other): + min_should_match = qx._min_should_match + # TODO: percentages or negative numbers will fail here + # for now we report an error + if not isinstance(min_should_match, int) or min_should_match < 0: + raise ValueError( + "Can only combine queries with positive integer values for minimum_should_match" + ) + # all subqueries are required + if len(qx.should) <= min_should_match: + q.must.extend(qx.should) + # not all of them are required, use it and remember min_should_match + elif not q.should: + q.minimum_should_match = min_should_match + q.should = qx.should + # all queries are optional, just extend should + elif q._min_should_match == 0 and min_should_match == 0: + q.should.extend(qx.should) + # not all are required, add a should list to the must with proper min_should_match + else: + q.must.append( + Bool(should=qx.should, minimum_should_match=min_should_match) + ) + else: + if not (q.must or q.filter) and q.should: + q._params.setdefault("minimum_should_match", 1) + q.must.append(other) + return q + + __rand__ = __and__ + + {% elif k.name == "Terms" %} + def _setattr(self, name: str, value: Any) -> None: + # here we convert any iterables that are not strings to lists + if hasattr(value, "__iter__") and not isinstance(value, (str, list)): + value = list(value) + super()._setattr(name, value) + + {% endif %} + +{% endfor %} diff --git a/utils/templates/types.py.tpl b/utils/templates/types.py.tpl new file mode 100644 index 00000000..ea2a5c11 --- /dev/null +++ b/utils/templates/types.py.tpl @@ -0,0 +1,84 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from typing import Any, Dict, Literal, Mapping, Sequence, Union + +from elastic_transport.client_utils import DEFAULT, DefaultType + +from elasticsearch_dsl.document_base import InstrumentedField +from elasticsearch_dsl import function, Query +from elasticsearch_dsl.utils import AttrDict + +PipeSeparatedFlags = str + + +{% for k in classes %} +class {{ k.name }}({{ k.parent if k.parent else "AttrDict[Any]" }}): + {% if k.args %} + """ + {% for arg in k.args %} + {% for line in arg.doc %} + {{ line }} + {% endfor %} + {% endfor %} + """ + {% for arg in k.args %} + {{ arg.name }}: {{ arg.type }} + {% endfor %} + + def __init__( + self, + {% for arg in k.args %} + {% if arg.positional %}{{ arg.name }}: {{ arg.type }} = DEFAULT,{% endif %} + {% endfor %} + {% if k.args and not k.args[-1].positional %} + *, + {% endif %} + {% for arg in k.args %} + {% if not arg.positional %}{{ arg.name }}: {{ arg.type }} = DEFAULT,{% endif %} + {% endfor %} + **kwargs: Any + ): + {% if k.is_single_field %} + if _field is not DEFAULT: + kwargs[str(_field)] = _value + {% elif k.is_multi_field %} + if _fields is not DEFAULT: + for field, value in _fields.items(): + kwargs[str(field)] = value + {% endif %} + {% for arg in k.args %} + {% if not arg.positional %} + if {{ arg.name }} is not DEFAULT: + {% if "InstrumentedField" in arg.type %} + kwargs["{{ arg.name }}"] = str({{ arg.name }}) + {% else %} + kwargs["{{ arg.name }}"] = {{ arg.name }} + {% endif %} + {% endif %} + {% endfor %} + {% if k.parent %} + super().__init__(**kwargs) + {% else %} + super().__init__(kwargs) + {% endif %} + {% else %} + pass + {% endif %} + + +{% endfor %}