From 4e4fe9c3a99faaded41b6c08d98bf8eda6f3ea6b Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 24 Oct 2024 23:28:55 +1100 Subject: [PATCH 01/60] Mute org.elasticsearch.xpack.restart.MLModelDeploymentFullClusterRestartIT testDeploymentSurvivesRestart {cluster=UPGRADED} #115528 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 8b9c3cc6ce712..2d5349ed03b48 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -282,6 +282,9 @@ tests: - class: org.elasticsearch.xpack.security.FileSettingsRoleMappingsRestartIT method: testFileSettingsReprocessedOnRestartWithoutVersionChange issue: https://github.com/elastic/elasticsearch/issues/115450 +- class: org.elasticsearch.xpack.restart.MLModelDeploymentFullClusterRestartIT + method: testDeploymentSurvivesRestart {cluster=UPGRADED} + issue: https://github.com/elastic/elasticsearch/issues/115528 # Examples: # From f774d0ee8249fef76182f76d401a97e217c53981 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Thu, 24 Oct 2024 14:58:37 +0200 Subject: [PATCH 02/60] Remove Delivery team as codeowners for gradle build scripts (#115523) --- .github/CODEOWNERS | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 5b98444c044d2..540da14402192 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -39,7 +39,6 @@ gradle @elastic/es-delivery build-conventions @elastic/es-delivery build-tools @elastic/es-delivery build-tools-internal @elastic/es-delivery -*.gradle @elastic/es-delivery .buildkite @elastic/es-delivery .ci @elastic/es-delivery .idea @elastic/es-delivery From 889d2c346e4ab498875b1bb0aaaee88c54f4c1a2 Mon Sep 17 00:00:00 2001 From: Mark Tozzi Date: Thu, 24 Oct 2024 09:03:12 -0400 Subject: [PATCH 03/60] [ESQL] Enable "any type" aggregations on Date Nanos (#114438) Resolves #110002 Resolves #110003 Resolves #110005 Enable Values, Count, CountDistinct, Min and Max aggregations on date nanos. In the course of addressing this, I had to make some changes to AggregateMapper where it maps types into string names. I tried to refactor this once before (#110841) but at the time we decided not to go ahead with it. That bit me while working on this, and so I am trying again to refactor it. This time I've made a more localized change, just replacing the cascading if block with a switch. That will cause a compile time failure when future new data types are added, unless they correctly update this section. I've also done a small refactoring on the aggregators themselves, to make the supplier function consistent with the typeResolution. --------- Co-authored-by: Elastic Machine --- .../src/main/resources/date_nanos.csv | 1 + .../src/main/resources/date_nanos.csv-spec | 31 ++++++++++++++ .../xpack/esql/action/EsqlCapabilities.java | 5 +++ .../function/aggregate/CountDistinct.java | 40 ++++++++++-------- .../expression/function/aggregate/Max.java | 42 +++++++++---------- .../expression/function/aggregate/Min.java | 42 +++++++++---------- .../expression/function/aggregate/Values.java | 38 +++++++++-------- .../xpack/esql/planner/AggregateMapper.java | 31 ++++++-------- 8 files changed, 131 insertions(+), 99 deletions(-) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv index 029c3baf3cbfb..26b6f055221a6 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv @@ -6,5 +6,6 @@ millis:date,nanos:date_nanos,num:long 2023-10-23T13:33:34.937Z,2023-10-23T13:33:34.937193000Z,1698068014937193000 2023-10-23T12:27:28.948Z,2023-10-23T12:27:28.948000000Z,1698064048948000000 2023-10-23T12:15:03.360Z,2023-10-23T12:15:03.360103847Z,1698063303360103847 +2023-10-23T12:15:03.360Z,2023-10-23T12:15:03.360103847Z,1698063303360103847 1999-10-23T12:15:03.360Z,[2023-03-23T12:15:03.360103847Z, 2023-02-23T13:33:34.937193000Z, 2023-01-23T13:55:01.543123456Z], 0 1999-10-22T12:15:03.360Z,[2023-03-23T12:15:03.360103847Z, 2023-03-23T12:15:03.360103847Z, 2023-03-23T12:15:03.360103847Z], 0 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec index 515e2c9c6587f..d0edc1f07d021 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec @@ -216,6 +216,7 @@ l:long 1698068014937193000 1698064048948000000 1698063303360103847 +1698063303360103847 ; long to date nanos, index version @@ -231,6 +232,7 @@ d:date_nanos 2023-10-23T13:33:34.937193000Z 2023-10-23T12:27:28.948000000Z 2023-10-23T12:15:03.360103847Z +2023-10-23T12:15:03.360103847Z ; date_nanos to date nanos, index version @@ -246,6 +248,7 @@ d:date_nanos 2023-10-23T13:33:34.937193000Z 2023-10-23T12:27:28.948000000Z 2023-10-23T12:15:03.360103847Z +2023-10-23T12:15:03.360103847Z ; attempt to cast the result of a fold to date nanos @@ -331,3 +334,31 @@ a:date_nanos [2023-02-23T13:33:34.937193000Z, 2023-03-23T12:15:03.360103847Z] [2023-03-23T12:15:03.360103847Z, 2023-03-23T12:15:03.360103847Z] ; + + +Max and Min of date nanos +required_capability: date_nanos_aggregations + +FROM date_nanos | STATS max = MAX(nanos), min = MIN(nanos); + +max:date_nanos | min:date_nanos +2023-10-23T13:55:01.543123456Z | 2023-01-23T13:55:01.543123456Z +; + +Count and count distinct of date nanos +required_capability: date_nanos_aggregations + +FROM date_nanos | WHERE millis > "2020-01-01" | STATS count = COUNT(nanos), count_distinct = COUNT_DISTINCT(nanos); + +count:long | count_distinct:long +8 | 7 +; + +Values aggregation on date nanos +required_capability: date_nanos_aggregations + +FROM date_nanos | WHERE millis > "2020-01-01" | STATS v = MV_SORT(VALUES(nanos), "DESC"); + +v:date_nanos +[2023-10-23T13:55:01.543123456Z, 2023-10-23T13:53:55.832987654Z, 2023-10-23T13:52:55.015787878Z, 2023-10-23T13:51:54.732102837Z, 2023-10-23T13:33:34.937193000Z, 2023-10-23T12:27:28.948000000Z, 2023-10-23T12:15:03.360103847Z] +; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index f22ad07a4c6f6..55236af648236 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -313,6 +313,11 @@ public enum Cap { */ LEAST_GREATEST_FOR_DATENANOS(EsqlCorePlugin.DATE_NANOS_FEATURE_FLAG), + /** + * support aggregations on date nanos + */ + DATE_NANOS_AGGREGATIONS(EsqlCorePlugin.DATE_NANOS_FEATURE_FLAG), + /** * Support for datetime in least and greatest functions */ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java index 756000dfbb187..5ae162f1fbb12 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java @@ -38,6 +38,8 @@ import java.io.IOException; import java.util.List; +import java.util.Map; +import java.util.function.BiFunction; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.SECOND; @@ -53,6 +55,20 @@ public class CountDistinct extends AggregateFunction implements OptionalArgument CountDistinct::new ); + private static final Map, Integer, AggregatorFunctionSupplier>> SUPPLIERS = Map.ofEntries( + // Booleans ignore the precision because there are only two possible values anyway + Map.entry(DataType.BOOLEAN, (inputChannels, precision) -> new CountDistinctBooleanAggregatorFunctionSupplier(inputChannels)), + Map.entry(DataType.LONG, CountDistinctLongAggregatorFunctionSupplier::new), + Map.entry(DataType.DATETIME, CountDistinctLongAggregatorFunctionSupplier::new), + Map.entry(DataType.DATE_NANOS, CountDistinctLongAggregatorFunctionSupplier::new), + Map.entry(DataType.INTEGER, CountDistinctIntAggregatorFunctionSupplier::new), + Map.entry(DataType.DOUBLE, CountDistinctDoubleAggregatorFunctionSupplier::new), + Map.entry(DataType.KEYWORD, CountDistinctBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.IP, CountDistinctBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.VERSION, CountDistinctBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.TEXT, CountDistinctBytesRefAggregatorFunctionSupplier::new) + ); + private static final int DEFAULT_PRECISION = 3000; private final Expression precision; @@ -102,7 +118,7 @@ public CountDistinct( Source source, @Param( name = "field", - type = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "text", "version" }, + type = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "version" }, description = "Column or literal for which to count the number of distinct values." ) Expression field, @Param( @@ -179,7 +195,7 @@ protected TypeResolution resolveType() { .and( isType( field(), - dt -> dt != DataType.UNSIGNED_LONG && dt != DataType.SOURCE, + SUPPLIERS::containsKey, sourceText(), DEFAULT, "any exact type except unsigned_long, _source, or counter types" @@ -196,23 +212,11 @@ protected TypeResolution resolveType() { public AggregatorFunctionSupplier supplier(List inputChannels) { DataType type = field().dataType(); int precision = this.precision == null ? DEFAULT_PRECISION : ((Number) this.precision.fold()).intValue(); - if (type == DataType.BOOLEAN) { - // Booleans ignore the precision because there are only two possible values anyway - return new CountDistinctBooleanAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.DATETIME || type == DataType.LONG) { - return new CountDistinctLongAggregatorFunctionSupplier(inputChannels, precision); - } - if (type == DataType.INTEGER) { - return new CountDistinctIntAggregatorFunctionSupplier(inputChannels, precision); - } - if (type == DataType.DOUBLE) { - return new CountDistinctDoubleAggregatorFunctionSupplier(inputChannels, precision); - } - if (DataType.isString(type) || type == DataType.IP || type == DataType.VERSION) { - return new CountDistinctBytesRefAggregatorFunctionSupplier(inputChannels, precision); + if (SUPPLIERS.containsKey(type) == false) { + // If the type checking did its job, this should never happen + throw EsqlIllegalArgumentException.illegalDataType(type); } - throw EsqlIllegalArgumentException.illegalDataType(type); + return SUPPLIERS.get(type).apply(inputChannels, precision); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java index 6119b2ce58465..ee16193efdccc 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java @@ -32,16 +32,28 @@ import java.io.IOException; import java.util.List; +import java.util.Map; +import java.util.function.Function; import static java.util.Collections.emptyList; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; -import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; -import static org.elasticsearch.xpack.esql.core.type.DataType.isRepresentable; -import static org.elasticsearch.xpack.esql.core.type.DataType.isSpatial; public class Max extends AggregateFunction implements ToAggregator, SurrogateExpression { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Max", Max::new); + private static final Map, AggregatorFunctionSupplier>> SUPPLIERS = Map.ofEntries( + Map.entry(DataType.BOOLEAN, MaxBooleanAggregatorFunctionSupplier::new), + Map.entry(DataType.LONG, MaxLongAggregatorFunctionSupplier::new), + Map.entry(DataType.DATETIME, MaxLongAggregatorFunctionSupplier::new), + Map.entry(DataType.DATE_NANOS, MaxLongAggregatorFunctionSupplier::new), + Map.entry(DataType.INTEGER, MaxIntAggregatorFunctionSupplier::new), + Map.entry(DataType.DOUBLE, MaxDoubleAggregatorFunctionSupplier::new), + Map.entry(DataType.IP, MaxIpAggregatorFunctionSupplier::new), + Map.entry(DataType.KEYWORD, MaxBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.TEXT, MaxBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.VERSION, MaxBytesRefAggregatorFunctionSupplier::new) + ); + @FunctionInfo( returnType = { "boolean", "double", "integer", "long", "date", "ip", "keyword", "text", "long", "version" }, description = "The maximum value of a field.", @@ -98,7 +110,7 @@ public Max replaceChildren(List newChildren) { protected TypeResolution resolveType() { return TypeResolutions.isType( field(), - t -> isRepresentable(t) && t != UNSIGNED_LONG && isSpatial(t) == false, + SUPPLIERS::containsKey, sourceText(), DEFAULT, "representable except unsigned_long and spatial types" @@ -113,25 +125,11 @@ public DataType dataType() { @Override public final AggregatorFunctionSupplier supplier(List inputChannels) { DataType type = field().dataType(); - if (type == DataType.BOOLEAN) { - return new MaxBooleanAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.LONG || type == DataType.DATETIME) { - return new MaxLongAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.INTEGER) { - return new MaxIntAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.DOUBLE) { - return new MaxDoubleAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.IP) { - return new MaxIpAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.VERSION || DataType.isString(type)) { - return new MaxBytesRefAggregatorFunctionSupplier(inputChannels); + if (SUPPLIERS.containsKey(type) == false) { + // If the type checking did its job, this should never happen + throw EsqlIllegalArgumentException.illegalDataType(type); } - throw EsqlIllegalArgumentException.illegalDataType(type); + return SUPPLIERS.get(type).apply(inputChannels); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java index a1492f79da393..7aaa41ea6ab11 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java @@ -32,16 +32,28 @@ import java.io.IOException; import java.util.List; +import java.util.Map; +import java.util.function.Function; import static java.util.Collections.emptyList; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; -import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; -import static org.elasticsearch.xpack.esql.core.type.DataType.isRepresentable; -import static org.elasticsearch.xpack.esql.core.type.DataType.isSpatial; public class Min extends AggregateFunction implements ToAggregator, SurrogateExpression { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Min", Min::new); + private static final Map, AggregatorFunctionSupplier>> SUPPLIERS = Map.ofEntries( + Map.entry(DataType.BOOLEAN, MinBooleanAggregatorFunctionSupplier::new), + Map.entry(DataType.LONG, MinLongAggregatorFunctionSupplier::new), + Map.entry(DataType.DATETIME, MinLongAggregatorFunctionSupplier::new), + Map.entry(DataType.DATE_NANOS, MinLongAggregatorFunctionSupplier::new), + Map.entry(DataType.INTEGER, MinIntAggregatorFunctionSupplier::new), + Map.entry(DataType.DOUBLE, MinDoubleAggregatorFunctionSupplier::new), + Map.entry(DataType.IP, MinIpAggregatorFunctionSupplier::new), + Map.entry(DataType.VERSION, MinBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.KEYWORD, MinBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.TEXT, MinBytesRefAggregatorFunctionSupplier::new) + ); + @FunctionInfo( returnType = { "boolean", "double", "integer", "long", "date", "ip", "keyword", "text", "long", "version" }, description = "The minimum value of a field.", @@ -98,7 +110,7 @@ public Min withFilter(Expression filter) { protected TypeResolution resolveType() { return TypeResolutions.isType( field(), - t -> isRepresentable(t) && t != UNSIGNED_LONG && isSpatial(t) == false, + SUPPLIERS::containsKey, sourceText(), DEFAULT, "representable except unsigned_long and spatial types" @@ -113,25 +125,11 @@ public DataType dataType() { @Override public final AggregatorFunctionSupplier supplier(List inputChannels) { DataType type = field().dataType(); - if (type == DataType.BOOLEAN) { - return new MinBooleanAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.LONG || type == DataType.DATETIME) { - return new MinLongAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.INTEGER) { - return new MinIntAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.DOUBLE) { - return new MinDoubleAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.IP) { - return new MinIpAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.VERSION || DataType.isString(type)) { - return new MinBytesRefAggregatorFunctionSupplier(inputChannels); + if (SUPPLIERS.containsKey(type) == false) { + // If the type checking did its job, this should never happen + throw EsqlIllegalArgumentException.illegalDataType(type); } - throw EsqlIllegalArgumentException.illegalDataType(type); + return SUPPLIERS.get(type).apply(inputChannels); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java index a844b981c95d6..8d576839c3c5c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java @@ -29,14 +29,28 @@ import java.io.IOException; import java.util.List; +import java.util.Map; +import java.util.function.Function; import static java.util.Collections.emptyList; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.DEFAULT; -import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; public class Values extends AggregateFunction implements ToAggregator { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Values", Values::new); + private static final Map, AggregatorFunctionSupplier>> SUPPLIERS = Map.ofEntries( + Map.entry(DataType.INTEGER, ValuesIntAggregatorFunctionSupplier::new), + Map.entry(DataType.LONG, ValuesLongAggregatorFunctionSupplier::new), + Map.entry(DataType.DATETIME, ValuesLongAggregatorFunctionSupplier::new), + Map.entry(DataType.DATE_NANOS, ValuesLongAggregatorFunctionSupplier::new), + Map.entry(DataType.DOUBLE, ValuesDoubleAggregatorFunctionSupplier::new), + Map.entry(DataType.KEYWORD, ValuesBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.TEXT, ValuesBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.IP, ValuesBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.VERSION, ValuesBytesRefAggregatorFunctionSupplier::new), + Map.entry(DataType.BOOLEAN, ValuesBooleanAggregatorFunctionSupplier::new) + ); + @FunctionInfo( returnType = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "text", "version" }, preview = true, @@ -98,7 +112,7 @@ public DataType dataType() { protected TypeResolution resolveType() { return TypeResolutions.isType( field(), - dt -> DataType.isSpatial(dt) == false && dt != UNSIGNED_LONG, + SUPPLIERS::containsKey, sourceText(), DEFAULT, "any type except unsigned_long and spatial types" @@ -108,22 +122,10 @@ protected TypeResolution resolveType() { @Override public AggregatorFunctionSupplier supplier(List inputChannels) { DataType type = field().dataType(); - if (type == DataType.INTEGER) { - return new ValuesIntAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.LONG || type == DataType.DATETIME) { - return new ValuesLongAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.DOUBLE) { - return new ValuesDoubleAggregatorFunctionSupplier(inputChannels); - } - if (DataType.isString(type) || type == DataType.IP || type == DataType.VERSION) { - return new ValuesBytesRefAggregatorFunctionSupplier(inputChannels); - } - if (type == DataType.BOOLEAN) { - return new ValuesBooleanAggregatorFunctionSupplier(inputChannels); + if (SUPPLIERS.containsKey(type) == false) { + // If the type checking did its job, this should never happen + throw EsqlIllegalArgumentException.illegalDataType(type); } - // TODO cartesian_point, geo_point - throw EsqlIllegalArgumentException.illegalDataType(type); + return SUPPLIERS.get(type).apply(inputChannels); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java index c322135198262..3e81c2a2c1101 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java @@ -297,25 +297,18 @@ private static String dataTypeToString(DataType type, Class aggClass) { if (aggClass == Top.class && type.equals(DataType.IP)) { return "Ip"; } - if (type.equals(DataType.BOOLEAN)) { - return "Boolean"; - } else if (type.equals(DataType.INTEGER) || type.equals(DataType.COUNTER_INTEGER)) { - return "Int"; - } else if (type.equals(DataType.LONG) || type.equals(DataType.DATETIME) || type.equals(DataType.COUNTER_LONG)) { - return "Long"; - } else if (type.equals(DataType.DOUBLE) || type.equals(DataType.COUNTER_DOUBLE)) { - return "Double"; - } else if (type.equals(DataType.KEYWORD) - || type.equals(DataType.IP) - || type.equals(DataType.VERSION) - || type.equals(DataType.TEXT)) { - return "BytesRef"; - } else if (type.equals(GEO_POINT)) { - return "GeoPoint"; - } else if (type.equals(CARTESIAN_POINT)) { - return "CartesianPoint"; - } else { + + return switch (type) { + case DataType.BOOLEAN -> "Boolean"; + case DataType.INTEGER, DataType.COUNTER_INTEGER -> "Int"; + case DataType.LONG, DataType.DATETIME, DataType.COUNTER_LONG, DataType.DATE_NANOS -> "Long"; + case DataType.DOUBLE, DataType.COUNTER_DOUBLE -> "Double"; + case DataType.KEYWORD, DataType.IP, DataType.VERSION, DataType.TEXT -> "BytesRef"; + case GEO_POINT -> "GeoPoint"; + case CARTESIAN_POINT -> "CartesianPoint"; + case SEMANTIC_TEXT, UNSUPPORTED, NULL, UNSIGNED_LONG, SHORT, BYTE, FLOAT, HALF_FLOAT, SCALED_FLOAT, OBJECT, SOURCE, DATE_PERIOD, + TIME_DURATION, CARTESIAN_SHAPE, GEO_SHAPE, DOC_DATA_TYPE, TSID_DATA_TYPE, PARTIAL_AGG -> throw new EsqlIllegalArgumentException("illegal agg type: " + type.typeName()); - } + }; } } From 28715b791a88de6b3f2ccb6b4f097a9881f01007 Mon Sep 17 00:00:00 2001 From: mspielberg <9729801+mspielberg@users.noreply.github.com> Date: Thu, 24 Oct 2024 06:06:39 -0700 Subject: [PATCH 04/60] Add documentation for minimum_should_match (#113043) --- .../reference/query-dsl/terms-set-query.asciidoc | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/docs/reference/query-dsl/terms-set-query.asciidoc b/docs/reference/query-dsl/terms-set-query.asciidoc index 2abfe54d53976..27717af3ac171 100644 --- a/docs/reference/query-dsl/terms-set-query.asciidoc +++ b/docs/reference/query-dsl/terms-set-query.asciidoc @@ -159,12 +159,22 @@ GET /job-candidates/_search `terms`:: + -- -(Required, array of strings) Array of terms you wish to find in the provided +(Required, array) Array of terms you wish to find in the provided ``. To return a document, a required number of terms must exactly match the field values, including whitespace and capitalization. -The required number of matching terms is defined in the -`minimum_should_match_field` or `minimum_should_match_script` parameter. +The required number of matching terms is defined in the `minimum_should_match`, +`minimum_should_match_field` or `minimum_should_match_script` parameters. Exactly +one of these parameters must be provided. +-- + +`minimum_should_match`:: ++ +-- +(Optional) Specification for the number of matching terms required to return +a document. + +For valid values, see <>. -- `minimum_should_match_field`:: From 6980fc62531923b68accc204fc25e7dea59760e3 Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Thu, 24 Oct 2024 15:11:10 +0200 Subject: [PATCH 05/60] [DOCS] Add text_expansion deprecation usage note (#115529) --- docs/reference/query-dsl/text-expansion-query.asciidoc | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/reference/query-dsl/text-expansion-query.asciidoc b/docs/reference/query-dsl/text-expansion-query.asciidoc index 235a413df686f..5c7bce8c3fcf0 100644 --- a/docs/reference/query-dsl/text-expansion-query.asciidoc +++ b/docs/reference/query-dsl/text-expansion-query.asciidoc @@ -7,6 +7,13 @@ deprecated[8.15.0, This query has been replaced by <>.] +.Deprecation usage note +**** +You can continue using `rank_features` fields with `text_expansion` queries in the current version. +However, if you plan to upgrade, we recommend updating mappings to use the `sparse_vector` field type and <>. +This will allow you to take advantage of the new capabilities and improvements available in newer versions. +**** + The text expansion query uses a {nlp} model to convert the query text into a list of token-weight pairs which are then used in a query against a <> or <> field. From 833f2fb9185072b0f8edcd2576d512ff91810277 Mon Sep 17 00:00:00 2001 From: Stef Nestor <26751266+stefnestor@users.noreply.github.com> Date: Thu, 24 Oct 2024 07:27:23 -0600 Subject: [PATCH 06/60] (Doc+) link video for resolving max shards open (#115480) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 👋 howdy team! @anniegale9538 and my [video](https://www.youtube.com/watch?v=tZKbDegt4-M) demonstrates how to resolve `max shards open` errors as a common support ask. --- docs/reference/how-to/size-your-shards.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/how-to/size-your-shards.asciidoc b/docs/reference/how-to/size-your-shards.asciidoc index 8770ec373bb18..86f195d030223 100644 --- a/docs/reference/how-to/size-your-shards.asciidoc +++ b/docs/reference/how-to/size-your-shards.asciidoc @@ -572,7 +572,7 @@ PUT _cluster/settings } ---- -For more information, see <>. +See this https://www.youtube.com/watch?v=tZKbDegt4-M[fixing "max shards open" video] for an example troubleshooting walkthrough. For more information, see <>. [discrete] [[troubleshooting-max-docs-limit]] From e99607b5895880d11b4981279314bcbb6b0fe3a9 Mon Sep 17 00:00:00 2001 From: Panagiotis Bailis Date: Thu, 24 Oct 2024 16:29:14 +0300 Subject: [PATCH 07/60] Adding breaking change entry for retrievers (#115399) --- docs/changelog/115399.yaml | 29 +++++++++++++++++++ .../TextSimilarityRankRetrieverBuilder.java | 2 +- .../xpack/rank/rrf/RRFRetrieverBuilder.java | 2 +- 3 files changed, 31 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/115399.yaml diff --git a/docs/changelog/115399.yaml b/docs/changelog/115399.yaml new file mode 100644 index 0000000000000..9f69657a5d167 --- /dev/null +++ b/docs/changelog/115399.yaml @@ -0,0 +1,29 @@ +pr: 115399 +summary: Adding breaking change entry for retrievers +area: Search +type: breaking +issues: [] +breaking: + title: Reworking RRF retriever to be evaluated during rewrite phase + area: REST API + details: |- + In this release (8.16), we have introduced major changes to the retrievers framework + and how they can be evaluated, focusing mainly on compound retrievers + like `rrf` and `text_similarity_reranker`, which allowed us to support full + composability (i.e. any retriever can be nested under any compound retriever), + as well as supporting additional search features like collapsing, explaining, + aggregations, and highlighting. + + To ensure consistency, and given that this rework is not available until 8.16, + `rrf` and `text_similarity_reranker` retriever queries would now + throw an exception in a mixed cluster scenario, where there are nodes + both in current or later (i.e. >= 8.16) and previous ( <= 8.15) versions. + + As part of the rework, we have also removed the `_rank` property from + the responses of an `rrf` retriever. + impact: |- + - Users will not be able to use the `rrf` and `text_similarity_reranker` retrievers in a mixed cluster scenario + with previous releases (i.e. prior to 8.16), and the request will throw an `IllegalArgumentException`. + - `_rank` has now been removed from the output of the `rrf` retrievers so trying to directly parse the field + will throw an exception + notable: false diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java index 342199dc51db8..91b6cdc61afe4 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java @@ -81,7 +81,7 @@ public static TextSimilarityRankRetrieverBuilder fromXContent(XContentParser par throw new ParsingException(parser.getTokenLocation(), "unknown retriever [" + TextSimilarityRankBuilder.NAME + "]"); } if (context.clusterSupportsFeature(TEXT_SIMILARITY_RERANKER_COMPOSITION_SUPPORTED) == false) { - throw new UnsupportedOperationException( + throw new IllegalArgumentException( "[text_similarity_reranker] retriever composition feature is not supported by all nodes in the cluster" ); } diff --git a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java index c3c9f19cde6ef..792ff4eac3893 100644 --- a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java +++ b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java @@ -83,7 +83,7 @@ public static RRFRetrieverBuilder fromXContent(XContentParser parser, RetrieverP throw new ParsingException(parser.getTokenLocation(), "unknown retriever [" + NAME + "]"); } if (context.clusterSupportsFeature(RRF_RETRIEVER_COMPOSITION_SUPPORTED) == false) { - throw new UnsupportedOperationException("[rrf] retriever composition feature is not supported by all nodes in the cluster"); + throw new IllegalArgumentException("[rrf] retriever composition feature is not supported by all nodes in the cluster"); } if (RRFRankPlugin.RANK_RRF_FEATURE.check(XPackPlugin.getSharedLicenseState()) == false) { throw LicenseUtils.newComplianceException("Reciprocal Rank Fusion (RRF)"); From 28882e86b200e9dfef47e6615bfd993d35f17abd Mon Sep 17 00:00:00 2001 From: Alexey Ivanov Date: Thu, 24 Oct 2024 14:30:32 +0100 Subject: [PATCH 08/60] Report JVM stats for all memory pools (97046) (#115117) This fix allows reporting of all JVM memory pools sizes in JVM stats --- docs/changelog/115117.yaml | 6 ++++++ .../elasticsearch/monitor/jvm/GcNames.java | 15 +++++++++++++- .../elasticsearch/monitor/jvm/JvmStats.java | 5 +---- .../monitor/jvm/JvmStatsTests.java | 20 +++++++++++++++++-- 4 files changed, 39 insertions(+), 7 deletions(-) create mode 100644 docs/changelog/115117.yaml diff --git a/docs/changelog/115117.yaml b/docs/changelog/115117.yaml new file mode 100644 index 0000000000000..de2defcd46afd --- /dev/null +++ b/docs/changelog/115117.yaml @@ -0,0 +1,6 @@ +pr: 115117 +summary: Report JVM stats for all memory pools (97046) +area: Infra/Core +type: bug +issues: + - 97046 diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/GcNames.java b/server/src/main/java/org/elasticsearch/monitor/jvm/GcNames.java index 9db8e8f414d5c..3494204c330c0 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/GcNames.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/GcNames.java @@ -15,8 +15,14 @@ public class GcNames { public static final String OLD = "old"; public static final String SURVIVOR = "survivor"; + private GcNames() {} + /** - * Resolves the GC type by its memory pool name ({@link java.lang.management.MemoryPoolMXBean#getName()}. + * Resolves the memory area name by the memory pool name provided by {@link java.lang.management.MemoryPoolMXBean#getName()} + * + * @param poolName the name of the memory pool from {@link java.lang.management.MemoryPoolMXBean} + * @param defaultName the name to return if the pool name does not match any known memory area + * @return memory area name corresponding to the pool name or {@code defaultName} if no match is found */ public static String getByMemoryPoolName(String poolName, String defaultName) { if ("Eden Space".equals(poolName) @@ -40,6 +46,13 @@ public static String getByMemoryPoolName(String poolName, String defaultName) { return defaultName; } + /** + * Resolves the GC type by the GC name provided by {@link java.lang.management.GarbageCollectorMXBean#getName()} + * + * @param gcName the name of the GC from {@link java.lang.management.GarbageCollectorMXBean} + * @param defaultName the name to return if the GC name does not match any known GC type + * @return GC type corresponding to the GC name or {@code defaultName} if no match is found + */ public static String getByGcName(String gcName, String defaultName) { if ("Copy".equals(gcName) || "PS Scavenge".equals(gcName) || "ParNew".equals(gcName) || "G1 Young Generation".equals(gcName)) { return YOUNG; diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java index 0a2763474b8df..e6b109207fdf3 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java @@ -64,10 +64,7 @@ public static JvmStats jvmStats() { List pools = new ArrayList<>(); for (MemoryPoolMXBean memoryPoolMXBean : memoryPoolMXBeans) { try { - String name = GcNames.getByMemoryPoolName(memoryPoolMXBean.getName(), null); - if (name == null) { // if we can't resolve it, its not interesting.... (Per Gen, Code Cache) - continue; - } + String name = GcNames.getByMemoryPoolName(memoryPoolMXBean.getName(), memoryPoolMXBean.getName()); MemoryUsage usage = memoryPoolMXBean.getUsage(); MemoryUsage peakUsage = memoryPoolMXBean.getPeakUsage(); pools.add( diff --git a/server/src/test/java/org/elasticsearch/monitor/jvm/JvmStatsTests.java b/server/src/test/java/org/elasticsearch/monitor/jvm/JvmStatsTests.java index 12fa776dd7efd..28976d803ff53 100644 --- a/server/src/test/java/org/elasticsearch/monitor/jvm/JvmStatsTests.java +++ b/server/src/test/java/org/elasticsearch/monitor/jvm/JvmStatsTests.java @@ -13,17 +13,22 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.test.ESTestCase; -import java.io.IOException; import java.util.Arrays; import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.hasSize; public class JvmStatsTests extends ESTestCase { - public void testJvmStats() throws IOException { + public void testJvmStats() { JvmStats stats = JvmStats.jvmStats(); assertNotNull(stats); assertNotNull(stats.getUptime()); @@ -40,6 +45,17 @@ public void testJvmStats() throws IOException { assertNotNull(mem.getHeapUsedPercent()); assertThat(mem.getHeapUsedPercent(), anyOf(equalTo((short) -1), greaterThanOrEqualTo((short) 0))); + // Memory pools + Map memoryPools = StreamSupport.stream(stats.getMem().spliterator(), false) + .collect(Collectors.toMap(JvmStats.MemoryPool::getName, Function.identity())); + assertThat(memoryPools, hasKey(GcNames.YOUNG)); + assertThat(memoryPools, hasKey(GcNames.OLD)); + assertThat(memoryPools, hasKey("Metaspace")); + assertThat(memoryPools.keySet(), hasSize(greaterThan(3))); + for (JvmStats.MemoryPool memoryPool : memoryPools.values()) { + assertThat(memoryPool.getUsed().getBytes(), greaterThan(0L)); + } + // Threads JvmStats.Threads threads = stats.getThreads(); assertNotNull(threads); From 37c7137f39d13ce36785c0bed01f2f058da886f8 Mon Sep 17 00:00:00 2001 From: Gergely Kalapos Date: Thu, 24 Oct 2024 15:49:45 +0200 Subject: [PATCH 09/60] [otel-data] Add more kubernetes aliases (#115429) * Add more kubernetes aliases * Update docs/changelog/115429.yaml * Review feedback --------- Co-authored-by: Elastic Machine --- docs/changelog/115429.yaml | 5 ++ .../semconv-resource-to-ecs@mappings.yaml | 48 +++++++++++++++++++ .../rest-api-spec/test/20_logs_tests.yml | 37 ++++++++++++++ 3 files changed, 90 insertions(+) create mode 100644 docs/changelog/115429.yaml diff --git a/docs/changelog/115429.yaml b/docs/changelog/115429.yaml new file mode 100644 index 0000000000000..ddf3c69183000 --- /dev/null +++ b/docs/changelog/115429.yaml @@ -0,0 +1,5 @@ +pr: 115429 +summary: "[otel-data] Add more kubernetes aliases" +area: Data streams +type: bug +issues: [] diff --git a/x-pack/plugin/otel-data/src/main/resources/component-templates/semconv-resource-to-ecs@mappings.yaml b/x-pack/plugin/otel-data/src/main/resources/component-templates/semconv-resource-to-ecs@mappings.yaml index 6645e7d282520..eb5cd6d37af83 100644 --- a/x-pack/plugin/otel-data/src/main/resources/component-templates/semconv-resource-to-ecs@mappings.yaml +++ b/x-pack/plugin/otel-data/src/main/resources/component-templates/semconv-resource-to-ecs@mappings.yaml @@ -56,21 +56,45 @@ template: os.version: type: keyword ignore_above: 1024 + k8s.container.name: + type: keyword + ignore_above: 1024 + k8s.cronjob.name: + type: keyword + ignore_above: 1024 + k8s.daemonset.name: + type: keyword + ignore_above: 1024 k8s.deployment.name: type: keyword ignore_above: 1024 + k8s.job.name: + type: keyword + ignore_above: 1024 k8s.namespace.name: type: keyword ignore_above: 1024 + k8s.node.hostname: + type: keyword + ignore_above: 1024 k8s.node.name: type: keyword ignore_above: 1024 + k8s.node.uid: + type: keyword + ignore_above: 1024 k8s.pod.name: type: keyword ignore_above: 1024 k8s.pod.uid: type: keyword ignore_above: 1024 + k8s.replicaset.name: + type: keyword + ignore_above: 1024 + k8s.statefulset.name: + type: keyword + ignore_above: 1024 service.node.name: type: alias path: resource.attributes.service.instance.id @@ -122,6 +146,30 @@ template: kubernetes.pod.uid: type: alias path: resource.attributes.k8s.pod.uid + kubernetes.container.name: + type: alias + path: resource.attributes.k8s.container.name + kubernetes.cronjob.name: + type: alias + path: resource.attributes.k8s.cronjob.name + kubernetes.job.name: + type: alias + path: resource.attributes.k8s.job.name + kubernetes.statefulset.name: + type: alias + path: resource.attributes.k8s.statefulset.name + kubernetes.daemonset.name: + type: alias + path: resource.attributes.k8s.daemonset.name + kubernetes.replicaset.name: + type: alias + path: resource.attributes.k8s.replicaset.name + kubernetes.node.uid: + type: alias + path: resource.attributes.k8s.node.uid + kubernetes.node.hostname: + type: alias + path: resource.attributes.k8s.node.hostname # Below are non-ECS fields that may be used by Kibana. service.language.name: type: alias diff --git a/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs_tests.yml b/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs_tests.yml index 6bc0cee78be4f..63966e601a3cb 100644 --- a/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs_tests.yml +++ b/x-pack/plugin/otel-data/src/yamlRestTest/resources/rest-api-spec/test/20_logs_tests.yml @@ -187,3 +187,40 @@ host.name pass-through: - length: { hits.hits: 1 } - match: { hits.hits.0.fields.resource\.attributes\.host\.name: [ "localhost" ] } - match: { hits.hits.0.fields.host\.name: [ "localhost" ] } +--- +"kubernetes.* -> resource.attributes.k8s.* aliases": + - do: + bulk: + index: logs-generic.otel-default + refresh: true + body: + - create: { } + - "@timestamp": 2024-07-18T14:48:33.467654000Z + data_stream: + dataset: generic.otel + namespace: default + resource: + attributes: + k8s.container.name: myContainerName + k8s.cronjob.name: myCronJobName + k8s.job.name: myJobName + k8s.statefulset.name: myStatefulsetName + k8s.daemonset.name: myDaemonsetName + k8s.replicaset.name: myReplicasetName + k8s.node.uid: myNodeUid + k8s.node.hostname: myNodeHostname + - is_false: errors + - do: + search: + index: logs-generic.otel-default + body: + fields: ["kubernetes.container.name", "kubernetes.cronjob.name", "kubernetes.job.name", "kubernetes.statefulset.name", "kubernetes.daemonset.name", "kubernetes.replicaset.name", "kubernetes.node.uid", "kubernetes.node.hostname" ] + - length: { hits.hits: 1 } + - match: { hits.hits.0.fields.kubernetes\.container\.name : ["myContainerName"] } + - match: { hits.hits.0.fields.kubernetes\.cronjob\.name : ["myCronJobName"] } + - match: { hits.hits.0.fields.kubernetes\.job\.name : ["myJobName"] } + - match: { hits.hits.0.fields.kubernetes\.statefulset\.name : ["myStatefulsetName"] } + - match: { hits.hits.0.fields.kubernetes\.daemonset\.name : ["myDaemonsetName"] } + - match: { hits.hits.0.fields.kubernetes\.replicaset\.name : ["myReplicasetName"] } + - match: { hits.hits.0.fields.kubernetes\.node\.uid : ["myNodeUid"] } + - match: { hits.hits.0.fields.kubernetes\.node\.hostname : ["myNodeHostname"] } From 31ede8fd284a79e1f62088d9800e59701f42b79a Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Thu, 24 Oct 2024 15:57:49 +0200 Subject: [PATCH 10/60] Update 8.12.0.asciidoc (#115303) (#115546) Fixing confusing format Co-authored-by: Johannes Mahne --- docs/reference/release-notes/8.12.0.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/release-notes/8.12.0.asciidoc b/docs/reference/release-notes/8.12.0.asciidoc index bfa99401f41a2..bd0ae032ef0b9 100644 --- a/docs/reference/release-notes/8.12.0.asciidoc +++ b/docs/reference/release-notes/8.12.0.asciidoc @@ -11,7 +11,7 @@ Also see <>. + When using `int8_hnsw` and the default `confidence_interval` (or any `confidence_interval` less than `1.0`) and when there are deleted documents in the segments, quantiles may fail to build and prevent merging. - ++ This issue is fixed in 8.12.1. * When upgrading clusters from version 8.11.4 or earlier, if your cluster contains non-master-eligible nodes, From aae3b3499a7e397bbd2f2cd7df0e218ec3f12caf Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 25 Oct 2024 00:57:55 +1100 Subject: [PATCH 11/60] Mute org.elasticsearch.test.apmintegration.MetricsApmIT testApmIntegration #115415 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 2d5349ed03b48..1ee677b14fea1 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -285,6 +285,9 @@ tests: - class: org.elasticsearch.xpack.restart.MLModelDeploymentFullClusterRestartIT method: testDeploymentSurvivesRestart {cluster=UPGRADED} issue: https://github.com/elastic/elasticsearch/issues/115528 +- class: org.elasticsearch.test.apmintegration.MetricsApmIT + method: testApmIntegration + issue: https://github.com/elastic/elasticsearch/issues/115415 # Examples: # From fffb98ac6c68cc633afbb855f697d514f4185c9b Mon Sep 17 00:00:00 2001 From: David Kyle Date: Thu, 24 Oct 2024 15:12:41 +0100 Subject: [PATCH 12/60] [ML] Set max allocations to 32 in default configs (#115518) --- .../services/elasticsearch/ElasticsearchInternalService.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java index 6732e5719b897..a0235f74ce511 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java @@ -859,7 +859,7 @@ private List defaultConfigs(boolean useLinuxOptimizedModel) { null, 1, useLinuxOptimizedModel ? ELSER_V2_MODEL_LINUX_X86 : ELSER_V2_MODEL, - new AdaptiveAllocationsSettings(Boolean.TRUE, 0, 8) + new AdaptiveAllocationsSettings(Boolean.TRUE, 0, 32) ), ElserMlNodeTaskSettings.DEFAULT, null // default chunking settings @@ -872,7 +872,7 @@ private List defaultConfigs(boolean useLinuxOptimizedModel) { null, 1, useLinuxOptimizedModel ? MULTILINGUAL_E5_SMALL_MODEL_ID_LINUX_X86 : MULTILINGUAL_E5_SMALL_MODEL_ID, - new AdaptiveAllocationsSettings(Boolean.TRUE, 0, 8) + new AdaptiveAllocationsSettings(Boolean.TRUE, 0, 32) ), null // default chunking settings ); From 7d829fa51a13b2150ce7c0a08e3f5f66c9ee8bfb Mon Sep 17 00:00:00 2001 From: David Kyle Date: Thu, 24 Oct 2024 15:14:29 +0100 Subject: [PATCH 13/60] [ML] Prevent NPE if model assignment is removed while waiting to start (#115430) --- docs/changelog/115430.yaml | 5 +++++ .../action/TransportStartTrainedModelDeploymentAction.java | 6 +++++- 2 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/115430.yaml diff --git a/docs/changelog/115430.yaml b/docs/changelog/115430.yaml new file mode 100644 index 0000000000000..c2903f7751012 --- /dev/null +++ b/docs/changelog/115430.yaml @@ -0,0 +1,5 @@ +pr: 115430 +summary: Prevent NPE if model assignment is removed while waiting to start +area: Machine Learning +type: bug +issues: [] diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java index 0bda2de2ce9ae..5fd70ce71cd24 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java @@ -671,7 +671,11 @@ public boolean test(ClusterState clusterState) { deploymentId ).orElse(null); if (trainedModelAssignment == null) { - // Something weird happened, it should NEVER be null... + // The assignment may be null if it was stopped by another action while waiting + this.exception = new ElasticsearchStatusException( + "Error waiting for the model deployment to start. The trained model assignment was removed while waiting", + RestStatus.BAD_REQUEST + ); logger.trace(() -> format("[%s] assignment was null while waiting for state [%s]", deploymentId, waitForState)); return true; } From 755c392bb22e9046ef79982aba188f3c45193c8b Mon Sep 17 00:00:00 2001 From: Luke Whiting Date: Thu, 24 Oct 2024 15:24:26 +0100 Subject: [PATCH 14/60] Fix for race condition in interval watcher scheduler tests (#115501) --- muted-tests.yml | 12 ------------ .../schedule/engine/TickerScheduleEngineTests.java | 12 ++++-------- 2 files changed, 4 insertions(+), 20 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 1ee677b14fea1..ba816ed5f3a9e 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -258,21 +258,9 @@ tests: - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=esql/60_usage/Basic ESQL usage output (telemetry)} issue: https://github.com/elastic/elasticsearch/issues/115231 -- class: org.elasticsearch.xpack.watcher.trigger.schedule.engine.TickerScheduleEngineTests - method: testAddWithNoLastCheckedTimeButHasActivationTimeExecutesBeforeInitialInterval - issue: https://github.com/elastic/elasticsearch/issues/115339 -- class: org.elasticsearch.xpack.watcher.trigger.schedule.engine.TickerScheduleEngineTests - method: testWatchWithLastCheckedTimeExecutesBeforeInitialInterval - issue: https://github.com/elastic/elasticsearch/issues/115354 -- class: org.elasticsearch.xpack.watcher.trigger.schedule.engine.TickerScheduleEngineTests - method: testAddWithLastCheckedTimeExecutesBeforeInitialInterval - issue: https://github.com/elastic/elasticsearch/issues/115356 - class: org.elasticsearch.xpack.inference.DefaultEndPointsIT method: testInferDeploysDefaultE5 issue: https://github.com/elastic/elasticsearch/issues/115361 -- class: org.elasticsearch.xpack.watcher.trigger.schedule.engine.TickerScheduleEngineTests - method: testWatchWithNoLastCheckedTimeButHasActivationTimeExecutesBeforeInitialInterval - issue: https://github.com/elastic/elasticsearch/issues/115368 - class: org.elasticsearch.reservedstate.service.FileSettingsServiceTests method: testProcessFileChanges issue: https://github.com/elastic/elasticsearch/issues/115280 diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java index 9a12b8f394eb2..ef290628c06d5 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java @@ -312,14 +312,13 @@ public void testWatchWithLastCheckedTimeExecutesBeforeInitialInterval() throws E engine.register(events -> { for (TriggerEvent ignored : events) { - if (runCount.get() == 0) { + if (runCount.getAndIncrement() == 0) { logger.info("job first fire"); firstLatch.countDown(); } else { logger.info("job second fire"); secondLatch.countDown(); } - runCount.incrementAndGet(); } }); @@ -375,14 +374,13 @@ public void testWatchWithNoLastCheckedTimeButHasActivationTimeExecutesBeforeInit engine.register(events -> { for (TriggerEvent ignored : events) { - if (runCount.get() == 0) { + if (runCount.getAndIncrement() == 0) { logger.info("job first fire"); firstLatch.countDown(); } else { logger.info("job second fire"); secondLatch.countDown(); } - runCount.incrementAndGet(); } }); @@ -428,14 +426,13 @@ public void testAddWithLastCheckedTimeExecutesBeforeInitialInterval() throws Exc engine.register(events -> { for (TriggerEvent ignored : events) { - if (runCount.get() == 0) { + if (runCount.getAndIncrement() == 0) { logger.info("job first fire"); firstLatch.countDown(); } else { logger.info("job second fire"); secondLatch.countDown(); } - runCount.incrementAndGet(); } }); @@ -492,14 +489,13 @@ public void testAddWithNoLastCheckedTimeButHasActivationTimeExecutesBeforeInitia engine.register(events -> { for (TriggerEvent ignored : events) { - if (runCount.get() == 0) { + if (runCount.getAndIncrement() == 0) { logger.info("job first fire"); firstLatch.countDown(); } else { logger.info("job second fire"); secondLatch.countDown(); } - runCount.incrementAndGet(); } }); From d7a9575d0314adcc65b50c4972c585464c2aefa9 Mon Sep 17 00:00:00 2001 From: Pete Gillin Date: Thu, 24 Oct 2024 15:58:24 +0100 Subject: [PATCH 15/60] Remove deprecated local parameter from alias APIs (#115393) This removes the `local` parameter from the `GET /_alias`, `HEAD /_alias`, and `GET /_cat/aliases` APIs. This option became a no-op and was deprecated in 8.12 by https://github.com/elastic/elasticsearch/pull/101815. We continue to accept the parameter (deprecated, with no other effect) in v8 compatibility mode for `GET /_alias` and `HEAD /_alias`. We don't do this for `GET /_cat/aliases` where the [compatibility policy does not apply](https://github.com/elastic/elasticsearch/blob/main/REST_API_COMPATIBILITY.md#when-not-to-apply). --- docs/changelog/115393.yaml | 18 ++++++++++++ docs/reference/cat/alias.asciidoc | 10 +++---- docs/reference/indices/alias-exists.asciidoc | 2 -- docs/reference/indices/get-alias.asciidoc | 2 -- rest-api-spec/build.gradle | 1 + .../rest-api-spec/api/cat.aliases.json | 4 --- .../api/indices.exists_alias.json | 4 --- .../rest-api-spec/api/indices.get_alias.json | 4 --- .../test/cat.aliases/10_basic.yml | 13 --------- .../test/indices.exists_alias/10_basic.yml | 14 --------- .../test/indices.get_alias/10_basic.yml | 29 ------------------- .../admin/indices/RestGetAliasesAction.java | 15 +++++----- .../rest/action/cat/RestAliasAction.java | 22 -------------- 13 files changed, 30 insertions(+), 108 deletions(-) create mode 100644 docs/changelog/115393.yaml diff --git a/docs/changelog/115393.yaml b/docs/changelog/115393.yaml new file mode 100644 index 0000000000000..5cf4e5f64ab34 --- /dev/null +++ b/docs/changelog/115393.yaml @@ -0,0 +1,18 @@ +pr: 115393 +summary: Remove deprecated local attribute from alias APIs +area: Indices APIs +type: breaking +issues: [] +breaking: + title: Remove deprecated local attribute from alias APIs + area: REST API + details: >- + The following APIs no longer accept the `?local` query parameter: + `GET /_alias`, `GET /_aliases`, `GET /_alias/{name}`, + `HEAD /_alias/{name}`, `GET /{index}/_alias`, `HEAD /{index}/_alias`, + `GET /{index}/_alias/{name}`, `HEAD /{index}/_alias/{name}`, + `GET /_cat/aliases`, and `GET /_cat/aliases/{alias}`. This parameter + has been deprecated and ignored since version 8.12. + impact: >- + Cease usage of the `?local` query parameter when calling the listed APIs. + notable: false diff --git a/docs/reference/cat/alias.asciidoc b/docs/reference/cat/alias.asciidoc index 72f949bf11e50..41ac279d3b2f5 100644 --- a/docs/reference/cat/alias.asciidoc +++ b/docs/reference/cat/alias.asciidoc @@ -6,8 +6,8 @@ [IMPORTANT] ==== -cat APIs are only intended for human consumption using the command line or the -{kib} console. They are _not_ intended for use by applications. For application +cat APIs are only intended for human consumption using the command line or the +{kib} console. They are _not_ intended for use by applications. For application consumption, use the <>. ==== @@ -45,8 +45,6 @@ include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=cat-h] include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=help] -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=local] - include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=cat-s] include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=cat-v] @@ -104,6 +102,6 @@ alias4 test1 - 2 1,2 - This response shows that `alias2` has configured a filter, and specific routing configurations in `alias3` and `alias4`. -If you only want to get information about specific aliases, you can specify -the aliases in comma-delimited format as a URL parameter, e.g., +If you only want to get information about specific aliases, you can specify +the aliases in comma-delimited format as a URL parameter, e.g., /_cat/aliases/alias1,alias2. diff --git a/docs/reference/indices/alias-exists.asciidoc b/docs/reference/indices/alias-exists.asciidoc index f820a95028a0f..d7b3454dcff56 100644 --- a/docs/reference/indices/alias-exists.asciidoc +++ b/docs/reference/indices/alias-exists.asciidoc @@ -52,8 +52,6 @@ Defaults to `all`. (Optional, Boolean) If `false`, requests that include a missing data stream or index in the `` return an error. Defaults to `false`. -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=local] - [[alias-exists-api-response-codes]] ==== {api-response-codes-title} diff --git a/docs/reference/indices/get-alias.asciidoc b/docs/reference/indices/get-alias.asciidoc index 743aaf7aee174..41d62fb70e01b 100644 --- a/docs/reference/indices/get-alias.asciidoc +++ b/docs/reference/indices/get-alias.asciidoc @@ -58,5 +58,3 @@ Defaults to `all`. `ignore_unavailable`:: (Optional, Boolean) If `false`, requests that include a missing data stream or index in the `` return an error. Defaults to `false`. - -include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=local] diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index 4bd293f0a8641..6cc2028bffa39 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -60,4 +60,5 @@ tasks.named("yamlRestCompatTestTransform").configure ({ task -> task.skipTest("indices.sort/10_basic/Index Sort", "warning does not exist for compatibility") task.skipTest("search/330_fetch_fields/Test search rewrite", "warning does not exist for compatibility") task.skipTest("indices.create/21_synthetic_source_stored/object param - nested object with stored array", "temporary until backported") + task.skipTest("cat.aliases/10_basic/Deprecated local parameter", "CAT APIs not covered by compatibility policy") }) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json index db49daeea372b..d3856b455efd1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json @@ -36,10 +36,6 @@ "type":"string", "description":"a short version of the Accept header, e.g. json, yaml" }, - "local":{ - "type":"boolean", - "description":"Return local information, do not retrieve the state from master node (default: false)" - }, "h":{ "type":"list", "description":"Comma-separated list of column names to display" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json index b70854fdc3eb2..7d7a9c96c6419 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json @@ -61,10 +61,6 @@ ], "default":"all", "description":"Whether to expand wildcard expression to concrete indices that are open, closed or both." - }, - "local":{ - "type":"boolean", - "description":"Return local information, do not retrieve the state from master node (default: false)" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_alias.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_alias.json index 0a4e4bb9ed90c..dc02a65adb068 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_alias.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_alias.json @@ -79,10 +79,6 @@ ], "default": "all", "description":"Whether to expand wildcard expression to concrete indices that are open, closed or both." - }, - "local":{ - "type":"boolean", - "description":"Return local information, do not retrieve the state from master node (default: false)" } } } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml index 2e5234bd1ced1..6118453d7805e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml @@ -484,16 +484,3 @@ test_alias \s+ test_index\n my_alias \s+ test_index\n $/ - ---- -"Deprecated local parameter": - - requires: - cluster_features: ["gte_v8.12.0"] - test_runner_features: ["warnings"] - reason: verifying deprecation warnings from 8.12.0 onwards - - - do: - cat.aliases: - local: true - warnings: - - "the [?local=true] query parameter to cat-aliases requests has no effect and will be removed in a future version" diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.exists_alias/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.exists_alias/10_basic.yml index bf499de8463bd..a4223c2a983be 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.exists_alias/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.exists_alias/10_basic.yml @@ -34,17 +34,3 @@ name: test_alias - is_false: '' - ---- -"Test indices.exists_alias with local flag": - - skip: - features: ["allowed_warnings"] - - - do: - indices.exists_alias: - name: test_alias - local: true - allowed_warnings: - - "the [?local=true] query parameter to get-aliases requests has no effect and will be removed in a future version" - - - is_false: '' diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_alias/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_alias/10_basic.yml index 4f26a69712e83..63ab40f3bf578 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_alias/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_alias/10_basic.yml @@ -289,21 +289,6 @@ setup: index: non-existent name: foo ---- -"Get alias with local flag": - - skip: - features: ["allowed_warnings"] - - - do: - indices.get_alias: - local: true - allowed_warnings: - - "the [?local=true] query parameter to get-aliases requests has no effect and will be removed in a future version" - - - is_true: test_index - - - is_true: test_index_2 - --- "Get alias against closed indices": - skip: @@ -329,17 +314,3 @@ setup: - is_true: test_index - is_false: test_index_2 - - ---- -"Deprecated local parameter": - - requires: - cluster_features: "gte_v8.12.0" - test_runner_features: ["warnings"] - reason: verifying deprecation warnings from 8.12.0 onwards - - - do: - indices.get_alias: - local: true - warnings: - - "the [?local=true] query parameter to get-aliases requests has no effect and will be removed in a future version" diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java index 7780ae08ac0ff..dfe501f29ce2e 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java @@ -22,7 +22,7 @@ import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.RestApiVersion; -import org.elasticsearch.core.UpdateForV9; +import org.elasticsearch.core.UpdateForV10; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -52,7 +52,7 @@ @ServerlessScope(Scope.PUBLIC) public class RestGetAliasesAction extends BaseRestHandler { - @UpdateForV9(owner = UpdateForV9.Owner.DATA_MANAGEMENT) // reject the deprecated ?local parameter + @UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT) // remove the BWC support for the deprecated ?local parameter private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(RestGetAliasesAction.class); @Override @@ -199,8 +199,7 @@ static RestResponse buildRestResponse( } @Override - @UpdateForV9(owner = UpdateForV9.Owner.DATA_MANAGEMENT) - // v7 REST API no longer exists: eliminate ref to RestApiVersion.V_7; reject local parameter in v9 too? + @UpdateForV10(owner = UpdateForV10.Owner.DATA_MANAGEMENT) // remove the BWC support for the deprecated ?local parameter public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { // The TransportGetAliasesAction was improved do the same post processing as is happening here. // We can't remove this logic yet to support mixed clusters. We should be able to remove this logic here @@ -213,10 +212,10 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC getAliasesRequest.indices(indices); getAliasesRequest.indicesOptions(IndicesOptions.fromRequest(request, getAliasesRequest.indicesOptions())); - if (request.hasParam("local")) { - // consume this param just for validation - final var localParam = request.paramAsBoolean("local", false); - if (request.getRestApiVersion() != RestApiVersion.V_7) { + if (request.getRestApiVersion() == RestApiVersion.V_8) { + if (request.hasParam("local")) { + // consume this param just for validation when in BWC mode for V_8 + final var localParam = request.paramAsBoolean("local", false); DEPRECATION_LOGGER.critical( DeprecationCategory.API, "get-aliases-local", diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java index 191746b421c98..6aa0b1c865682 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java @@ -15,10 +15,6 @@ import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.Table; -import org.elasticsearch.common.logging.DeprecationCategory; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.core.RestApiVersion; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.Scope; @@ -34,8 +30,6 @@ @ServerlessScope(Scope.PUBLIC) public class RestAliasAction extends AbstractCatAction { - private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(RestAliasAction.class); - @Override public List routes() { return List.of(new Route(GET, "/_cat/aliases"), new Route(GET, "/_cat/aliases/{alias}")); @@ -52,27 +46,11 @@ public boolean allowSystemIndexAccessByDefault() { } @Override - @UpdateForV9(owner = UpdateForV9.Owner.DATA_MANAGEMENT) - // v7 REST API no longer exists: eliminate ref to RestApiVersion.V_7; reject local parameter in v9 too? protected RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { final GetAliasesRequest getAliasesRequest = request.hasParam("alias") ? new GetAliasesRequest(Strings.commaDelimitedListToStringArray(request.param("alias"))) : new GetAliasesRequest(); getAliasesRequest.indicesOptions(IndicesOptions.fromRequest(request, getAliasesRequest.indicesOptions())); - - if (request.hasParam("local")) { - // consume this param just for validation - final var localParam = request.paramAsBoolean("local", false); - if (request.getRestApiVersion() != RestApiVersion.V_7) { - DEPRECATION_LOGGER.critical( - DeprecationCategory.API, - "cat-aliases-local", - "the [?local={}] query parameter to cat-aliases requests has no effect and will be removed in a future version", - localParam - ); - } - } - return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin() .indices() .getAliases(getAliasesRequest, new RestResponseListener<>(channel) { From d8a3fc22cde255dc9b7456ba1009bb8b45b7407d Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Thu, 24 Oct 2024 16:59:10 +0200 Subject: [PATCH 16/60] [test] Don't test any 7.x snapshots in `testLogicallyEquivalentSnapshotIsUsedEvenIfFilesAreDifferent` (#114821) Don't test any 7.x snapshots, keep using any 8,x compatible snapshot and Lucene version. Originally added in 8.0 (#77420) for testing peer recoveries using snapshots. Co-authored-by: Yang Wang Co-authored-by: Elastic Machine --- .../SnapshotsRecoveryPlannerServiceTests.java | 20 ++----------------- 1 file changed, 2 insertions(+), 18 deletions(-) diff --git a/x-pack/plugin/snapshot-based-recoveries/src/test/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerServiceTests.java b/x-pack/plugin/snapshot-based-recoveries/src/test/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerServiceTests.java index b082698254d17..6e7f2d82cfb1d 100644 --- a/x-pack/plugin/snapshot-based-recoveries/src/test/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerServiceTests.java +++ b/x-pack/plugin/snapshot-based-recoveries/src/test/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerServiceTests.java @@ -26,15 +26,12 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.IOUtils; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.StoreFileMetadata; -import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.recovery.plan.ShardRecoveryPlan; import org.elasticsearch.indices.recovery.plan.ShardSnapshot; import org.elasticsearch.indices.recovery.plan.ShardSnapshotsService; @@ -63,7 +60,6 @@ import static org.elasticsearch.common.util.CollectionUtils.iterableAsArrayList; import static org.elasticsearch.index.engine.Engine.ES_VERSION; import static org.elasticsearch.index.engine.Engine.HISTORY_UUID_KEY; -import static org.elasticsearch.test.index.IndexVersionUtils.randomVersionBetween; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -203,8 +199,6 @@ public void fetchLatestSnapshotsForShard(ShardId shardId, ActionListener { boolean shareFilesWithSource = randomBoolean(); @@ -217,18 +211,8 @@ public void testLogicallyEquivalentSnapshotIsUsedEvenIfFilesAreDifferent() throw final IndexVersion snapshotVersion; final Version luceneVersion; if (compatibleVersion) { - snapshotVersion = randomBoolean() ? null : IndexVersionUtils.randomCompatibleVersion(random()); - // If snapshotVersion is not present, - // then lucene version must be < RecoverySettings.SEQ_NO_SNAPSHOT_RECOVERIES_SUPPORTED_VERSION - if (snapshotVersion == null) { - luceneVersion = randomVersionBetween( - random(), - IndexVersions.V_7_0_0, - RecoverySettings.SNAPSHOT_RECOVERIES_SUPPORTED_INDEX_VERSION - ).luceneVersion(); - } else { - luceneVersion = IndexVersionUtils.randomCompatibleVersion(random()).luceneVersion(); - } + snapshotVersion = IndexVersionUtils.randomCompatibleVersion(random()); + luceneVersion = snapshotVersion.luceneVersion(); } else { snapshotVersion = IndexVersion.fromId(Integer.MAX_VALUE); luceneVersion = org.apache.lucene.util.Version.parse("255.255.255"); From f5d3c7c3d8bff7b91430c42d66550613e2716387 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Thu, 24 Oct 2024 17:09:34 +0200 Subject: [PATCH 17/60] Remove legacy join validation transport protocol (#114571) We introduced a new join validation protocol in #85380 (8.3), the legacy protocol can be removed in 9.0 Remove assertion that we run a version after 8.3.0 --- .../coordination/JoinValidationService.java | 57 ++----------------- .../coordination/ValidateJoinRequest.java | 21 ++----- 2 files changed, 12 insertions(+), 66 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java index 34d59c9860aba..7de7fd4d92d1b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java @@ -13,7 +13,6 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.TransportVersion; -import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.cluster.ClusterState; @@ -31,7 +30,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.env.Environment; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.threadpool.ThreadPool; @@ -46,7 +44,6 @@ import java.io.IOException; import java.util.Collection; import java.util.HashMap; -import java.util.Locale; import java.util.Map; import java.util.Queue; import java.util.concurrent.ConcurrentLinkedQueue; @@ -162,55 +159,14 @@ public void validateJoin(DiscoveryNode discoveryNode, ActionListener liste return; } - if (connection.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0)) { - if (executeRefs.tryIncRef()) { - try { - execute(new JoinValidation(discoveryNode, connection, listener)); - } finally { - executeRefs.decRef(); - } - } else { - listener.onFailure(new NodeClosedException(transportService.getLocalNode())); + if (executeRefs.tryIncRef()) { + try { + execute(new JoinValidation(discoveryNode, connection, listener)); + } finally { + executeRefs.decRef(); } } else { - legacyValidateJoin(discoveryNode, listener, connection); - } - } - - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) - private void legacyValidateJoin(DiscoveryNode discoveryNode, ActionListener listener, Transport.Connection connection) { - final var responseHandler = TransportResponseHandler.empty(responseExecutor, listener.delegateResponse((l, e) -> { - logger.warn(() -> "failed to validate incoming join request from node [" + discoveryNode + "]", e); - listener.onFailure( - new IllegalStateException( - String.format( - Locale.ROOT, - "failure when sending a join validation request from [%s] to [%s]", - transportService.getLocalNode().descriptionWithoutAttributes(), - discoveryNode.descriptionWithoutAttributes() - ), - e - ) - ); - })); - final var clusterState = clusterStateSupplier.get(); - if (clusterState != null) { - assert clusterState.nodes().isLocalNodeElectedMaster(); - transportService.sendRequest( - connection, - JOIN_VALIDATE_ACTION_NAME, - new ValidateJoinRequest(clusterState), - REQUEST_OPTIONS, - responseHandler - ); - } else { - transportService.sendRequest( - connection, - JoinHelper.JOIN_PING_ACTION_NAME, - new JoinHelper.JoinPingRequest(), - REQUEST_OPTIONS, - responseHandler - ); + listener.onFailure(new NodeClosedException(transportService.getLocalNode())); } } @@ -341,7 +297,6 @@ private class JoinValidation extends ActionRunnable { @Override protected void doRun() { - assert connection.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0) : discoveryNode.getVersion(); // NB these things never run concurrently to each other, or to the cache cleaner (see IMPLEMENTATION NOTES above) so it is safe // to do these (non-atomic) things to the (unsynchronized) statesByVersion map. var transportVersion = connection.getTransportVersion(); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java index 1d99f28e62582..c81e4877196b3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java @@ -9,7 +9,6 @@ package org.elasticsearch.cluster.coordination; import org.elasticsearch.TransportVersion; -import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.bytes.BytesReference; @@ -29,19 +28,12 @@ public class ValidateJoinRequest extends TransportRequest { public ValidateJoinRequest(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0)) { - // recent versions send a BytesTransportRequest containing a compressed representation of the state - final var bytes = in.readReleasableBytesReference(); - final var version = in.getTransportVersion(); - final var namedWriteableRegistry = in.namedWriteableRegistry(); - this.stateSupplier = () -> readCompressed(version, bytes, namedWriteableRegistry); - this.refCounted = bytes; - } else { - // older versions just contain the bare state - final var state = ClusterState.readFrom(in, null); - this.stateSupplier = () -> state; - this.refCounted = null; - } + // recent versions send a BytesTransportRequest containing a compressed representation of the state + final var bytes = in.readReleasableBytesReference(); + final var version = in.getTransportVersion(); + final var namedWriteableRegistry = in.namedWriteableRegistry(); + this.stateSupplier = () -> readCompressed(version, bytes, namedWriteableRegistry); + this.refCounted = bytes; } private static ClusterState readCompressed( @@ -68,7 +60,6 @@ public ValidateJoinRequest(ClusterState state) { @Override public void writeTo(StreamOutput out) throws IOException { - assert out.getTransportVersion().before(TransportVersions.V_8_3_0); super.writeTo(out); stateSupplier.get().writeTo(out); } From 2ddd08aff7bea3a4ef1e4aea28d2ae63518902a1 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Thu, 24 Oct 2024 10:10:36 -0500 Subject: [PATCH 18/60] Fixing ingest simulate yaml rest test when there is a global legacy template (#115559) The ingest simulate yaml rest test `Test mapping addition works with indices without templates` tests what happens when an index has a mapping but matches no template at all. However, randomly and rarely a global match-all legacy template is applied to the cluster. When this happens, the assumptions for the test fail since the index matches a template. This PR removes that global legacy template so that the test works as intended. Closes #115412 Closes #115472 --- muted-tests.yml | 3 --- .../rest-api-spec/test/ingest/80_ingest_simulate.yml | 7 +++++++ 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index ba816ed5f3a9e..8c90f73f475e6 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -264,9 +264,6 @@ tests: - class: org.elasticsearch.reservedstate.service.FileSettingsServiceTests method: testProcessFileChanges issue: https://github.com/elastic/elasticsearch/issues/115280 -- class: org.elasticsearch.smoketest.SmokeTestIngestWithAllDepsClientYamlTestSuiteIT - method: test {yaml=ingest/80_ingest_simulate/Test mapping addition works with legacy templates} - issue: https://github.com/elastic/elasticsearch/issues/115412 - class: org.elasticsearch.xpack.security.FileSettingsRoleMappingsRestartIT method: testFileSettingsReprocessedOnRestartWithoutVersionChange issue: https://github.com/elastic/elasticsearch/issues/115450 diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml index 4d1a62c6f179e..7ed5ad3154151 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml @@ -1586,6 +1586,13 @@ setup: cluster_features: ["simulate.support.non.template.mapping"] reason: "ingest simulate support for indices with mappings that didn't come from templates added in 8.17" + # A global match-everything legacy template is added to the cluster sometimes (rarely). We have to get rid of this template if it exists + # because this test is making sure we get correct behavior when an index matches *no* template: + - do: + indices.delete_template: + name: '*' + ignore: 404 + # First, make sure that validation fails before we create the index (since we are only defining to bar field but trying to index a value # for foo. - do: From 79be69a5f87da015e6105a84537c590ae68c197b Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Thu, 24 Oct 2024 18:22:13 +0300 Subject: [PATCH 19/60] Ignore _field_names warning in testRollupAfterRestart (#115563) --- .../org/elasticsearch/xpack/restart/FullClusterRestartIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index c57e5653d1279..a56ddaabe8280 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -435,6 +435,7 @@ public void testRollupAfterRestart() throws Exception { final Request bulkRequest = new Request("POST", "/_bulk"); bulkRequest.setJsonEntity(bulk.toString()); + bulkRequest.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(fieldNamesFieldOk())); client().performRequest(bulkRequest); // create the rollup job From fb6c729858b443956ba41c68495a5de084ffa73d Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 24 Oct 2024 08:47:52 -0700 Subject: [PATCH 20/60] Guard blob store local directory creation with doPrivileged (#115459) The blob store may be triggered to create a local directory while in a reduced privilege context. This commit guards the creation of directories with doPrivileged. --- docs/changelog/115459.yaml | 5 +++++ .../common/blobstore/fs/FsBlobStore.java | 15 ++++++++++----- 2 files changed, 15 insertions(+), 5 deletions(-) create mode 100644 docs/changelog/115459.yaml diff --git a/docs/changelog/115459.yaml b/docs/changelog/115459.yaml new file mode 100644 index 0000000000000..b20a8f765c084 --- /dev/null +++ b/docs/changelog/115459.yaml @@ -0,0 +1,5 @@ +pr: 115459 +summary: Guard blob store local directory creation with `doPrivileged` +area: Infra/Core +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java index c4240672239fa..53e3b4b4796dc 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java @@ -19,6 +19,8 @@ import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; +import java.security.AccessController; +import java.security.PrivilegedAction; import java.util.Iterator; import java.util.List; @@ -56,11 +58,14 @@ public int bufferSizeInBytes() { public BlobContainer blobContainer(BlobPath path) { Path f = buildPath(path); if (readOnly == false) { - try { - Files.createDirectories(f); - } catch (IOException ex) { - throw new ElasticsearchException("failed to create blob container", ex); - } + AccessController.doPrivileged((PrivilegedAction) () -> { + try { + Files.createDirectories(f); + } catch (IOException ex) { + throw new ElasticsearchException("failed to create blob container", ex); + } + return null; + }); } return new FsBlobContainer(this, path, f); } From 482d2aced5f888d548a755e0fe20fc6f83125d11 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Thu, 24 Oct 2024 17:58:36 +0200 Subject: [PATCH 21/60] Remove unused elasticsearch cloud docker image (#115357) --- .../gradle/internal/DockerBase.java | 3 --- distribution/docker/build.gradle | 25 +++---------------- .../cloud-docker-aarch64-export/build.gradle | 2 -- .../docker/cloud-docker-export/build.gradle | 2 -- .../build.gradle | 2 -- .../wolfi-ess-docker-export/build.gradle | 2 -- .../packaging/test/DockerTests.java | 11 +++----- .../test/KeystoreManagementTests.java | 5 +--- .../packaging/test/PackagingTestCase.java | 6 ++--- .../packaging/util/Distribution.java | 5 +--- .../packaging/util/docker/Docker.java | 2 +- .../packaging/util/docker/DockerRun.java | 1 - settings.gradle | 2 -- 13 files changed, 12 insertions(+), 56 deletions(-) delete mode 100644 distribution/docker/cloud-docker-aarch64-export/build.gradle delete mode 100644 distribution/docker/cloud-docker-export/build.gradle delete mode 100644 distribution/docker/wolfi-ess-docker-aarch64-export/build.gradle delete mode 100644 distribution/docker/wolfi-ess-docker-export/build.gradle diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java index fb52daf7e164f..0535f0bdc3cc8 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java @@ -21,9 +21,6 @@ public enum DockerBase { // The Iron Bank base image is UBI (albeit hardened), but we are required to parameterize the Docker build IRON_BANK("${BASE_REGISTRY}/${BASE_IMAGE}:${BASE_TAG}", "-ironbank", "yum"), - // Base image with extras for Cloud - CLOUD("ubuntu:20.04", "-cloud", "apt-get"), - // Chainguard based wolfi image with latest jdk // This is usually updated via renovatebot // spotless:off diff --git a/distribution/docker/build.gradle b/distribution/docker/build.gradle index e40ac68bbacf4..788e836f8f045 100644 --- a/distribution/docker/build.gradle +++ b/distribution/docker/build.gradle @@ -288,20 +288,6 @@ void addBuildDockerContextTask(Architecture architecture, DockerBase base) { } } - if (base == DockerBase.CLOUD) { - // If we're performing a release build, but `build.id` hasn't been set, we can - // infer that we're not at the Docker building stage of the build, and therefore - // we should skip the beats part of the build. - String buildId = providers.systemProperty('build.id').getOrNull() - boolean includeBeats = VersionProperties.isElasticsearchSnapshot() == true || buildId != null || useDra - - if (includeBeats) { - from configurations.getByName("filebeat_${architecture.classifier}") - from configurations.getByName("metricbeat_${architecture.classifier}") - } - // For some reason, the artifact name can differ depending on what repository we used. - rename ~/((?:file|metric)beat)-.*\.tar\.gz$/, "\$1-${VersionProperties.elasticsearch}.tar.gz" - } Provider serviceProvider = GradleUtils.getBuildService( project.gradle.sharedServices, DockerSupportPlugin.DOCKER_SUPPORT_SERVICE_NAME @@ -381,7 +367,7 @@ private static List generateTags(DockerBase base, Architecture architect String image = "elasticsearch${base.suffix}" String namespace = 'elasticsearch' - if (base == DockerBase.CLOUD || base == DockerBase.CLOUD_ESS) { + if (base == base == DockerBase.CLOUD_ESS) { namespace += '-ci' } @@ -439,7 +425,7 @@ void addBuildDockerImageTask(Architecture architecture, DockerBase base) { } - if (base != DockerBase.IRON_BANK && base != DockerBase.CLOUD && base != DockerBase.CLOUD_ESS) { + if (base != DockerBase.IRON_BANK && base != DockerBase.CLOUD_ESS) { tasks.named("assemble").configure { dependsOn(buildDockerImageTask) } @@ -548,10 +534,6 @@ subprojects { Project subProject -> base = DockerBase.IRON_BANK } else if (subProject.name.contains('cloud-ess-')) { base = DockerBase.CLOUD_ESS - } else if (subProject.name.contains('cloud-')) { - base = DockerBase.CLOUD - } else if (subProject.name.contains('wolfi-ess')) { - base = DockerBase.WOLFI_ESS } else if (subProject.name.contains('wolfi-')) { base = DockerBase.WOLFI } @@ -559,10 +541,9 @@ subprojects { Project subProject -> final String arch = architecture == Architecture.AARCH64 ? '-aarch64' : '' final String extension = base == DockerBase.UBI ? 'ubi.tar' : (base == DockerBase.IRON_BANK ? 'ironbank.tar' : - (base == DockerBase.CLOUD ? 'cloud.tar' : (base == DockerBase.CLOUD_ESS ? 'cloud-ess.tar' : (base == DockerBase.WOLFI ? 'wolfi.tar' : - 'docker.tar')))) + 'docker.tar'))) final String artifactName = "elasticsearch${arch}${base.suffix}_test" final String exportTaskName = taskName("export", architecture, base, 'DockerImage') diff --git a/distribution/docker/cloud-docker-aarch64-export/build.gradle b/distribution/docker/cloud-docker-aarch64-export/build.gradle deleted file mode 100644 index 537b5a093683e..0000000000000 --- a/distribution/docker/cloud-docker-aarch64-export/build.gradle +++ /dev/null @@ -1,2 +0,0 @@ -// This file is intentionally blank. All configuration of the -// export is done in the parent project. diff --git a/distribution/docker/cloud-docker-export/build.gradle b/distribution/docker/cloud-docker-export/build.gradle deleted file mode 100644 index 537b5a093683e..0000000000000 --- a/distribution/docker/cloud-docker-export/build.gradle +++ /dev/null @@ -1,2 +0,0 @@ -// This file is intentionally blank. All configuration of the -// export is done in the parent project. diff --git a/distribution/docker/wolfi-ess-docker-aarch64-export/build.gradle b/distribution/docker/wolfi-ess-docker-aarch64-export/build.gradle deleted file mode 100644 index 537b5a093683e..0000000000000 --- a/distribution/docker/wolfi-ess-docker-aarch64-export/build.gradle +++ /dev/null @@ -1,2 +0,0 @@ -// This file is intentionally blank. All configuration of the -// export is done in the parent project. diff --git a/distribution/docker/wolfi-ess-docker-export/build.gradle b/distribution/docker/wolfi-ess-docker-export/build.gradle deleted file mode 100644 index 537b5a093683e..0000000000000 --- a/distribution/docker/wolfi-ess-docker-export/build.gradle +++ /dev/null @@ -1,2 +0,0 @@ -// This file is intentionally blank. All configuration of the -// export is done in the parent project. diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java index 4ca97bff42333..8cb8354eb5d71 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/DockerTests.java @@ -169,10 +169,7 @@ public void test012SecurityCanBeDisabled() throws Exception { * Checks that no plugins are initially active. */ public void test020PluginsListWithNoPlugins() { - assumeTrue( - "Only applies to non-Cloud images", - distribution.packaging != Packaging.DOCKER_CLOUD && distribution().packaging != Packaging.DOCKER_CLOUD_ESS - ); + assumeTrue("Only applies to non-Cloud images", distribution().packaging != Packaging.DOCKER_CLOUD_ESS); final Installation.Executables bin = installation.executables(); final Result r = sh.run(bin.pluginTool + " list"); @@ -1116,8 +1113,8 @@ public void test170DefaultShellIsBash() { */ public void test171AdditionalCliOptionsAreForwarded() throws Exception { assumeTrue( - "Does not apply to Cloud and Cloud ESS images, because they don't use the default entrypoint", - distribution.packaging != Packaging.DOCKER_CLOUD && distribution().packaging != Packaging.DOCKER_CLOUD_ESS + "Does not apply to Cloud ESS images, because they don't use the default entrypoint", + distribution().packaging != Packaging.DOCKER_CLOUD_ESS ); runContainer(distribution(), builder().runArgs("bin/elasticsearch", "-Ecluster.name=kimchy").envVar("ELASTIC_PASSWORD", PASSWORD)); @@ -1204,7 +1201,7 @@ public void test310IronBankImageHasNoAdditionalLabels() throws Exception { * Check that the Cloud image contains the required Beats */ public void test400CloudImageBundlesBeats() { - assumeTrue(distribution.packaging == Packaging.DOCKER_CLOUD || distribution.packaging == Packaging.DOCKER_CLOUD_ESS); + assumeTrue(distribution.packaging == Packaging.DOCKER_CLOUD_ESS); final List contents = listContents("/opt"); assertThat("Expected beats in /opt", contents, hasItems("filebeat", "metricbeat")); diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/KeystoreManagementTests.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/KeystoreManagementTests.java index a988a446f561f..02e1ce35764cf 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/KeystoreManagementTests.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/KeystoreManagementTests.java @@ -436,10 +436,7 @@ private void verifyKeystorePermissions() { switch (distribution.packaging) { case TAR, ZIP -> assertThat(keystore, file(File, ARCHIVE_OWNER, ARCHIVE_OWNER, p660)); case DEB, RPM -> assertThat(keystore, file(File, "root", "elasticsearch", p660)); - case DOCKER, DOCKER_UBI, DOCKER_IRON_BANK, DOCKER_CLOUD, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> assertThat( - keystore, - DockerFileMatcher.file(p660) - ); + case DOCKER, DOCKER_UBI, DOCKER_IRON_BANK, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> assertThat(keystore, DockerFileMatcher.file(p660)); default -> throw new IllegalStateException("Unknown Elasticsearch packaging type."); } } diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java index 644990105f60f..b4a00ca56924a 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java @@ -245,7 +245,7 @@ protected static void install() throws Exception { installation = Packages.installPackage(sh, distribution); Packages.verifyPackageInstallation(installation, distribution, sh); } - case DOCKER, DOCKER_UBI, DOCKER_IRON_BANK, DOCKER_CLOUD, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> { + case DOCKER, DOCKER_UBI, DOCKER_IRON_BANK, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> { installation = Docker.runContainer(distribution); Docker.verifyContainerInstallation(installation); } @@ -335,7 +335,6 @@ public Shell.Result runElasticsearchStartCommand(String password, boolean daemon case DOCKER: case DOCKER_UBI: case DOCKER_IRON_BANK: - case DOCKER_CLOUD: case DOCKER_CLOUD_ESS: case DOCKER_WOLFI: // nothing, "installing" docker image is running it @@ -358,7 +357,6 @@ public void stopElasticsearch() throws Exception { case DOCKER: case DOCKER_UBI: case DOCKER_IRON_BANK: - case DOCKER_CLOUD: case DOCKER_CLOUD_ESS: case DOCKER_WOLFI: // nothing, "installing" docker image is running it @@ -373,7 +371,7 @@ public void awaitElasticsearchStartup(Shell.Result result) throws Exception { switch (distribution.packaging) { case TAR, ZIP -> Archives.assertElasticsearchStarted(installation); case DEB, RPM -> Packages.assertElasticsearchStarted(sh, installation); - case DOCKER, DOCKER_UBI, DOCKER_IRON_BANK, DOCKER_CLOUD, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> Docker.waitForElasticsearchToStart(); + case DOCKER, DOCKER_UBI, DOCKER_IRON_BANK, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> Docker.waitForElasticsearchToStart(); default -> throw new IllegalStateException("Unknown Elasticsearch packaging type."); } } diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/Distribution.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/Distribution.java index 05cef4a0818ba..11b8324384631 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/Distribution.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/Distribution.java @@ -33,8 +33,6 @@ public Distribution(Path path) { this.packaging = Packaging.DOCKER_UBI; } else if (filename.endsWith(".ironbank.tar")) { this.packaging = Packaging.DOCKER_IRON_BANK; - } else if (filename.endsWith(".cloud.tar")) { - this.packaging = Packaging.DOCKER_CLOUD; } else if (filename.endsWith(".cloud-ess.tar")) { this.packaging = Packaging.DOCKER_CLOUD_ESS; } else if (filename.endsWith(".wolfi.tar")) { @@ -63,7 +61,7 @@ public boolean isPackage() { */ public boolean isDocker() { return switch (packaging) { - case DOCKER, DOCKER_UBI, DOCKER_IRON_BANK, DOCKER_CLOUD, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> true; + case DOCKER, DOCKER_UBI, DOCKER_IRON_BANK, DOCKER_CLOUD_ESS, DOCKER_WOLFI -> true; default -> false; }; } @@ -77,7 +75,6 @@ public enum Packaging { DOCKER(".docker.tar", Platforms.isDocker()), DOCKER_UBI(".ubi.tar", Platforms.isDocker()), DOCKER_IRON_BANK(".ironbank.tar", Platforms.isDocker()), - DOCKER_CLOUD(".cloud.tar", Platforms.isDocker()), DOCKER_CLOUD_ESS(".cloud-ess.tar", Platforms.isDocker()), DOCKER_WOLFI(".wolfi.tar", Platforms.isDocker()); diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/Docker.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/Docker.java index c38eaa58f0552..0cd2823080b9b 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/Docker.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/Docker.java @@ -532,7 +532,7 @@ public static void verifyContainerInstallation(Installation es) { ) ); - if (es.distribution.packaging == Packaging.DOCKER_CLOUD || es.distribution.packaging == Packaging.DOCKER_CLOUD_ESS) { + if (es.distribution.packaging == Packaging.DOCKER_CLOUD_ESS) { verifyCloudContainerInstallation(es); } } diff --git a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/DockerRun.java b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/DockerRun.java index e562e7591564e..e3eac23d3ecce 100644 --- a/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/DockerRun.java +++ b/qa/packaging/src/test/java/org/elasticsearch/packaging/util/docker/DockerRun.java @@ -165,7 +165,6 @@ public static String getImageName(Distribution distribution) { case DOCKER -> ""; case DOCKER_UBI -> "-ubi"; case DOCKER_IRON_BANK -> "-ironbank"; - case DOCKER_CLOUD -> "-cloud"; case DOCKER_CLOUD_ESS -> "-cloud-ess"; case DOCKER_WOLFI -> "-wolfi"; default -> throw new IllegalStateException("Unexpected distribution packaging type: " + distribution.packaging); diff --git a/settings.gradle b/settings.gradle index a95a46a3569d7..39453e8d0935a 100644 --- a/settings.gradle +++ b/settings.gradle @@ -63,8 +63,6 @@ List projects = [ 'distribution:archives:linux-aarch64-tar', 'distribution:archives:linux-tar', 'distribution:docker', - 'distribution:docker:cloud-docker-export', - 'distribution:docker:cloud-docker-aarch64-export', 'distribution:docker:cloud-ess-docker-export', 'distribution:docker:cloud-ess-docker-aarch64-export', 'distribution:docker:docker-aarch64-export', From d500daf2e16bb3b6fb4bdde49bbf9d93b7fec25b Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Thu, 24 Oct 2024 18:02:11 +0200 Subject: [PATCH 22/60] [DOCS][101] Add BYO vectors ingestion tutorial (#115112) --- docs/reference/images/semantic-options.svg | 62 ++++++++ .../search-your-data/ingest-vectors.asciidoc | 141 ++++++++++++++++++ .../search-your-data/semantic-search.asciidoc | 3 + 3 files changed, 206 insertions(+) create mode 100644 docs/reference/images/semantic-options.svg create mode 100644 docs/reference/search/search-your-data/ingest-vectors.asciidoc diff --git a/docs/reference/images/semantic-options.svg b/docs/reference/images/semantic-options.svg new file mode 100644 index 0000000000000..3bedf5307357e --- /dev/null +++ b/docs/reference/images/semantic-options.svg @@ -0,0 +1,62 @@ + + + + Elasticsearch semantic search workflows + + + + + + semantic_text + (Recommended) + + + + Inference API + + + + Model Deployment + + + Complexity: Low + Complexity: Medium + Complexity: High + + + + + + Create Inference Endpoint + + + Define Index Mapping + + + + Create Inference Endpoint + + + Configure Model Settings + + + Define Index Mapping + + + Setup Ingest Pipeline + + + + Select NLP Model + + + Deploy with Eland Client + + + Define Index Mapping + + + Setup Ingest Pipeline + + + diff --git a/docs/reference/search/search-your-data/ingest-vectors.asciidoc b/docs/reference/search/search-your-data/ingest-vectors.asciidoc new file mode 100644 index 0000000000000..f288293d2b03a --- /dev/null +++ b/docs/reference/search/search-your-data/ingest-vectors.asciidoc @@ -0,0 +1,141 @@ +[[bring-your-own-vectors]] +=== Bring your own dense vector embeddings to {es} +++++ +Bring your own dense vectors +++++ + +This tutorial demonstrates how to index documents that already have dense vector embeddings into {es}. +You'll also learn the syntax for searching these documents using a `knn` query. + +You'll find links at the end of this tutorial for more information about deploying a text embedding model in {es}, so you can generate embeddings for queries on the fly. + +[TIP] +==== +This is an advanced use case. +Refer to <> for an overview of your options for semantic search with {es}. +==== + +[discrete] +[[bring-your-own-vectors-create-index]] +=== Step 1: Create an index with `dense_vector` mapping + +Each document in our simple dataset will have: + +* A review: stored in a `review_text` field +* An embedding of that review: stored in a `review_vector` field +** The `review_vector` field is defined as a <> data type. + +[TIP] +==== +The `dense_vector` type automatically uses `int8_hnsw` quantization by default to reduce the memory footprint required when searching float vectors. +Learn more about balancing performance and accuracy in <>. +==== + +[source,console] +---- +PUT /amazon-reviews +{ + "mappings": { + "properties": { + "review_vector": { + "type": "dense_vector", + "dims": 8, <1> + "index": true, <2> + "similarity": "cosine" <3> + }, + "review_text": { + "type": "text" + } + } + } +} +---- +// TEST SETUP +<1> The `dims` parameter must match the length of the embedding vector. Here we're using a simple 8-dimensional embedding for readability. If not specified, `dims` will be dynamically calculated based on the first indexed document. +<2> The `index` parameter is set to `true` to enable the use of the `knn` query. +<3> The `similarity` parameter defines the similarity function used to compare the query vector to the document vectors. `cosine` is the default similarity function for `dense_vector` fields in {es}. + +[discrete] +[[bring-your-own-vectors-index-documents]] +=== Step 2: Index documents with embeddings + +[discrete] +==== Index a single document + +First, index a single document to understand the document structure. + +[source,console] +---- +PUT /amazon-reviews/_doc/1 +{ + "review_text": "This product is lifechanging! I'm telling all my friends about it.", + "review_vector": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8] <1> +} +---- +// TEST +<1> The size of the `review_vector` array is 8, matching the `dims` count specified in the mapping. + +[discrete] +==== Bulk index multiple documents + +In a production scenario, you'll want to index many documents at once using the <>. + +Here's an example of indexing multiple documents in a single `_bulk` request. + +[source,console] +---- +POST /_bulk +{ "index": { "_index": "amazon-reviews", "_id": "2" } } +{ "review_text": "This product is amazing! I love it.", "review_vector": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8] } +{ "index": { "_index": "amazon-reviews", "_id": "3" } } +{ "review_text": "This product is terrible. I hate it.", "review_vector": [0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1] } +{ "index": { "_index": "amazon-reviews", "_id": "4" } } +{ "review_text": "This product is great. I can do anything with it.", "review_vector": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8] } +{ "index": { "_index": "amazon-reviews", "_id": "5" } } +{ "review_text": "This product has ruined my life and the lives of my family and friends.", "review_vector": [0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1] } +---- +// TEST[continued] + +[discrete] +[[bring-your-own-vectors-search-documents]] +=== Step 3: Search documents with embeddings + +Now you can query these document vectors using a <>. +`knn` is a type of vector search, which finds the `k` most similar documents to a query vector. +Here we're simply using a raw vector for the query text, for demonstration purposes. + +[source,console] +---- +POST /amazon-reviews/_search +{ + "retriever": { + "knn": { + "field": "review_vector", + "query_vector": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8], <1> + "k": 2, <2> + "num_candidates": 5 <3> + } + } +} +---- +// TEST[skip:flakeyknnerror] +<1> In this simple example, we're sending a raw vector as the query text. In a real-world scenario, you'll need to generate vectors for queries using an embedding model. +<2> The `k` parameter specifies the number of results to return. +<3> The `num_candidates` parameter is optional. It limits the number of candidates returned by the search node. This can improve performance and reduce costs. + +[discrete] +[[bring-your-own-vectors-learn-more]] +=== Learn more + +In this simple example, we're sending a raw vector for the query text. +In a real-world scenario you won't know the query text ahead of time. +You'll need to generate query vectors, on the fly, using the same embedding model that generated the document vectors. + +For this you'll need to deploy a text embedding model in {es} and use the <>. Alternatively, you can generate vectors client-side and send them directly with the search request. + +Learn how to <> for semantic search. + +[TIP] +==== +If you're just getting started with vector search in {es}, refer to <>. +==== diff --git a/docs/reference/search/search-your-data/semantic-search.asciidoc b/docs/reference/search/search-your-data/semantic-search.asciidoc index 0ef8591e42b5d..e0fb8415fee18 100644 --- a/docs/reference/search/search-your-data/semantic-search.asciidoc +++ b/docs/reference/search/search-your-data/semantic-search.asciidoc @@ -8,6 +8,8 @@ Using an NLP model enables you to extract text embeddings out of text. Embeddings are vectors that provide a numeric representation of a text. Pieces of content with similar meaning have similar representations. +image::images/semantic-options.svg[Overview of semantic search workflows in {es}] + You have several options for using NLP models in the {stack}: * use the `semantic_text` workflow (recommended) @@ -109,3 +111,4 @@ include::semantic-search-inference.asciidoc[] include::semantic-search-elser.asciidoc[] include::cohere-es.asciidoc[] include::semantic-search-deploy-model.asciidoc[] +include::ingest-vectors.asciidoc[] From a270ee3f9c3e0dcfdd2874d8f64b9612098ddaf3 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 25 Oct 2024 03:05:08 +1100 Subject: [PATCH 23/60] Mute org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT test {yaml=reference/esql/esql-across-clusters/line_197} #115575 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 8c90f73f475e6..ab5d686a041c1 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -273,6 +273,9 @@ tests: - class: org.elasticsearch.test.apmintegration.MetricsApmIT method: testApmIntegration issue: https://github.com/elastic/elasticsearch/issues/115415 +- class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT + method: test {yaml=reference/esql/esql-across-clusters/line_197} + issue: https://github.com/elastic/elasticsearch/issues/115575 # Examples: # From c64226c3503b458c3285064d95528932d324177d Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Thu, 24 Oct 2024 18:19:14 +0200 Subject: [PATCH 24/60] Don't return or accept `node_version` in the Desired Nodes API (#114580) It was deprecated in #104209 (8.13) and shouldn't be set or returned in 9.0 The Desired Nodes API is an internal API, and users shouldn't depend on its backward compatibility. --- .../upgrades/DesiredNodesUpgradeIT.java | 13 +-- rest-api-spec/build.gradle | 2 + .../test/cluster.desired_nodes/10_basic.yml | 95 ------------------- .../cluster/metadata/DesiredNode.java | 77 +-------------- .../metadata/DesiredNodeWithStatus.java | 5 +- .../cluster/RestUpdateDesiredNodesAction.java | 12 --- 6 files changed, 13 insertions(+), 191 deletions(-) diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java index e0d1e7aafa637..17618d5439d48 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java @@ -11,7 +11,6 @@ import com.carrotsearch.randomizedtesting.annotations.Name; -import org.elasticsearch.Build; import org.elasticsearch.action.admin.cluster.desirednodes.UpdateDesiredNodesRequest; import org.elasticsearch.client.Request; import org.elasticsearch.client.ResponseException; @@ -82,8 +81,7 @@ private void assertDesiredNodesUpdatedWithRoundedUpFloatsAreIdempotent() throws Settings.builder().put(NODE_NAME_SETTING.getKey(), nodeName).build(), 1238.49922909, ByteSizeValue.ofGb(32), - ByteSizeValue.ofGb(128), - clusterHasFeature(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED) ? null : Build.current().version() + ByteSizeValue.ofGb(128) ) ) .toList(); @@ -153,8 +151,7 @@ private void addClusterNodesToDesiredNodesWithProcessorsOrProcessorRanges(int ve Settings.builder().put(NODE_NAME_SETTING.getKey(), nodeName).build(), processorsPrecision == ProcessorsPrecision.DOUBLE ? randomDoubleProcessorCount() : 0.5f, ByteSizeValue.ofGb(randomIntBetween(10, 24)), - ByteSizeValue.ofGb(randomIntBetween(128, 256)), - clusterHasFeature(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED) ? null : Build.current().version() + ByteSizeValue.ofGb(randomIntBetween(128, 256)) ) ) .toList(); @@ -167,8 +164,7 @@ private void addClusterNodesToDesiredNodesWithProcessorsOrProcessorRanges(int ve Settings.builder().put(NODE_NAME_SETTING.getKey(), nodeName).build(), new DesiredNode.ProcessorsRange(minProcessors, minProcessors + randomIntBetween(10, 20)), ByteSizeValue.ofGb(randomIntBetween(10, 24)), - ByteSizeValue.ofGb(randomIntBetween(128, 256)), - clusterHasFeature(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED) ? null : Build.current().version() + ByteSizeValue.ofGb(randomIntBetween(128, 256)) ); }).toList(); } @@ -182,8 +178,7 @@ private void addClusterNodesToDesiredNodesWithIntegerProcessors(int version) thr Settings.builder().put(NODE_NAME_SETTING.getKey(), nodeName).build(), randomIntBetween(1, 24), ByteSizeValue.ofGb(randomIntBetween(10, 24)), - ByteSizeValue.ofGb(randomIntBetween(128, 256)), - clusterHasFeature(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED) ? null : Build.current().version() + ByteSizeValue.ofGb(randomIntBetween(128, 256)) ) ) .toList(); diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index 6cc2028bffa39..1a398f79085e7 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -61,4 +61,6 @@ tasks.named("yamlRestCompatTestTransform").configure ({ task -> task.skipTest("search/330_fetch_fields/Test search rewrite", "warning does not exist for compatibility") task.skipTest("indices.create/21_synthetic_source_stored/object param - nested object with stored array", "temporary until backported") task.skipTest("cat.aliases/10_basic/Deprecated local parameter", "CAT APIs not covered by compatibility policy") + task.skipTest("cluster.desired_nodes/10_basic/Test delete desired nodes with node_version generates a warning", "node_version warning is removed in 9.0") + task.skipTest("cluster.desired_nodes/10_basic/Test update desired nodes with node_version generates a warning", "node_version warning is removed in 9.0") }) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_nodes/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_nodes/10_basic.yml index 1d1aa524ffb21..a45146a4e147a 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_nodes/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_nodes/10_basic.yml @@ -59,61 +59,6 @@ teardown: - contains: { nodes: { settings: { node: { name: "instance-000187" } }, processors: 8.5, memory: "64gb", storage: "128gb" } } - contains: { nodes: { settings: { node: { name: "instance-000188" } }, processors: 16.0, memory: "128gb", storage: "1tb" } } --- -"Test update desired nodes with node_version generates a warning": - - skip: - reason: "contains is a newly added assertion" - features: ["contains", "allowed_warnings"] - - do: - cluster.state: {} - - # Get master node id - - set: { master_node: master } - - - do: - nodes.info: {} - - set: { nodes.$master.version: es_version } - - - do: - _internal.update_desired_nodes: - history_id: "test" - version: 1 - body: - nodes: - - { settings: { "node.name": "instance-000187" }, processors: 8.5, memory: "64gb", storage: "128gb", node_version: $es_version } - allowed_warnings: - - "[version removal] Specifying node_version in desired nodes requests is deprecated." - - match: { replaced_existing_history_id: false } - - - do: - _internal.get_desired_nodes: {} - - match: - $body: - history_id: "test" - version: 1 - nodes: - - { settings: { node: { name: "instance-000187" } }, processors: 8.5, memory: "64gb", storage: "128gb", node_version: $es_version } - - - do: - _internal.update_desired_nodes: - history_id: "test" - version: 2 - body: - nodes: - - { settings: { "node.name": "instance-000187" }, processors: 8.5, memory: "64gb", storage: "128gb", node_version: $es_version } - - { settings: { "node.name": "instance-000188" }, processors: 16.0, memory: "128gb", storage: "1tb", node_version: $es_version } - allowed_warnings: - - "[version removal] Specifying node_version in desired nodes requests is deprecated." - - match: { replaced_existing_history_id: false } - - - do: - _internal.get_desired_nodes: {} - - - match: { history_id: "test" } - - match: { version: 2 } - - length: { nodes: 2 } - - contains: { nodes: { settings: { node: { name: "instance-000187" } }, processors: 8.5, memory: "64gb", storage: "128gb", node_version: $es_version } } - - contains: { nodes: { settings: { node: { name: "instance-000188" } }, processors: 16.0, memory: "128gb", storage: "1tb", node_version: $es_version } } ---- "Test update move to a new history id": - skip: reason: "contains is a newly added assertion" @@ -199,46 +144,6 @@ teardown: _internal.get_desired_nodes: {} - match: { status: 404 } --- -"Test delete desired nodes with node_version generates a warning": - - skip: - features: allowed_warnings - - do: - cluster.state: {} - - - set: { master_node: master } - - - do: - nodes.info: {} - - set: { nodes.$master.version: es_version } - - - do: - _internal.update_desired_nodes: - history_id: "test" - version: 1 - body: - nodes: - - { settings: { "node.external_id": "instance-000187" }, processors: 8.0, memory: "64gb", storage: "128gb", node_version: $es_version } - allowed_warnings: - - "[version removal] Specifying node_version in desired nodes requests is deprecated." - - match: { replaced_existing_history_id: false } - - - do: - _internal.get_desired_nodes: {} - - match: - $body: - history_id: "test" - version: 1 - nodes: - - { settings: { node: { external_id: "instance-000187" } }, processors: 8.0, memory: "64gb", storage: "128gb", node_version: $es_version } - - - do: - _internal.delete_desired_nodes: {} - - - do: - catch: missing - _internal.get_desired_nodes: {} - - match: { status: 404 } ---- "Test update desired nodes is idempotent": - skip: reason: "contains is a newly added assertion" diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java index fb8559b19d81d..fe72a59565cf6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java @@ -14,7 +14,6 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -22,7 +21,6 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.Processors; import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ObjectParser; @@ -38,7 +36,6 @@ import java.util.Set; import java.util.TreeSet; import java.util.function.Predicate; -import java.util.regex.Pattern; import static java.lang.String.format; import static org.elasticsearch.node.Node.NODE_EXTERNAL_ID_SETTING; @@ -58,8 +55,6 @@ public final class DesiredNode implements Writeable, ToXContentObject, Comparabl private static final ParseField PROCESSORS_RANGE_FIELD = new ParseField("processors_range"); private static final ParseField MEMORY_FIELD = new ParseField("memory"); private static final ParseField STORAGE_FIELD = new ParseField("storage"); - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) // Remove deprecated field - private static final ParseField VERSION_FIELD = new ParseField("node_version"); public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "desired_node", @@ -69,8 +64,7 @@ public final class DesiredNode implements Writeable, ToXContentObject, Comparabl (Processors) args[1], (ProcessorsRange) args[2], (ByteSizeValue) args[3], - (ByteSizeValue) args[4], - (String) args[5] + (ByteSizeValue) args[4] ) ); @@ -104,12 +98,6 @@ static void configureParser(ConstructingObjectParser parser) { STORAGE_FIELD, ObjectParser.ValueType.STRING ); - parser.declareField( - ConstructingObjectParser.optionalConstructorArg(), - (p, c) -> p.text(), - VERSION_FIELD, - ObjectParser.ValueType.STRING - ); } private final Settings settings; @@ -118,21 +106,9 @@ static void configureParser(ConstructingObjectParser parser) { private final ByteSizeValue memory; private final ByteSizeValue storage; - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) // Remove deprecated version field - private final String version; private final String externalId; private final Set roles; - @Deprecated - public DesiredNode(Settings settings, ProcessorsRange processorsRange, ByteSizeValue memory, ByteSizeValue storage, String version) { - this(settings, null, processorsRange, memory, storage, version); - } - - @Deprecated - public DesiredNode(Settings settings, double processors, ByteSizeValue memory, ByteSizeValue storage, String version) { - this(settings, Processors.of(processors), null, memory, storage, version); - } - public DesiredNode(Settings settings, ProcessorsRange processorsRange, ByteSizeValue memory, ByteSizeValue storage) { this(settings, null, processorsRange, memory, storage); } @@ -142,17 +118,6 @@ public DesiredNode(Settings settings, double processors, ByteSizeValue memory, B } DesiredNode(Settings settings, Processors processors, ProcessorsRange processorsRange, ByteSizeValue memory, ByteSizeValue storage) { - this(settings, processors, processorsRange, memory, storage, null); - } - - DesiredNode( - Settings settings, - Processors processors, - ProcessorsRange processorsRange, - ByteSizeValue memory, - ByteSizeValue storage, - @Deprecated String version - ) { assert settings != null; assert memory != null; assert storage != null; @@ -186,7 +151,6 @@ public DesiredNode(Settings settings, double processors, ByteSizeValue memory, B this.processorsRange = processorsRange; this.memory = memory; this.storage = storage; - this.version = version; this.externalId = NODE_EXTERNAL_ID_SETTING.get(settings); this.roles = Collections.unmodifiableSortedSet(new TreeSet<>(DiscoveryNode.getRolesFromSettings(settings))); } @@ -210,19 +174,7 @@ public static DesiredNode readFrom(StreamInput in) throws IOException { } else { version = Version.readVersion(in).toString(); } - return new DesiredNode(settings, processors, processorsRange, memory, storage, version); - } - - private static final Pattern SEMANTIC_VERSION_PATTERN = Pattern.compile("^(\\d+\\.\\d+\\.\\d+)\\D?.*"); - - private static Version parseLegacyVersion(String version) { - if (version != null) { - var semanticVersionMatcher = SEMANTIC_VERSION_PATTERN.matcher(version); - if (semanticVersionMatcher.matches()) { - return Version.fromString(semanticVersionMatcher.group(1)); - } - } - return null; + return new DesiredNode(settings, processors, processorsRange, memory, storage); } @Override @@ -239,15 +191,9 @@ public void writeTo(StreamOutput out) throws IOException { memory.writeTo(out); storage.writeTo(out); if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) { - out.writeOptionalString(version); + out.writeOptionalString(null); } else { - Version parsedVersion = parseLegacyVersion(version); - if (version == null) { - // Some node is from before we made the version field not required. If so, fill in with the current node version. - Version.writeVersion(Version.CURRENT, out); - } else { - Version.writeVersion(parsedVersion, out); - } + Version.writeVersion(Version.CURRENT, out); } } @@ -275,14 +221,6 @@ public void toInnerXContent(XContentBuilder builder, Params params) throws IOExc } builder.field(MEMORY_FIELD.getPreferredName(), memory); builder.field(STORAGE_FIELD.getPreferredName(), storage); - addDeprecatedVersionField(builder); - } - - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) // Remove deprecated field from response - private void addDeprecatedVersionField(XContentBuilder builder) throws IOException { - if (version != null) { - builder.field(VERSION_FIELD.getPreferredName(), version); - } } public boolean hasMasterRole() { @@ -366,7 +304,6 @@ private boolean equalsWithoutProcessorsSpecification(DesiredNode that) { return Objects.equals(settings, that.settings) && Objects.equals(memory, that.memory) && Objects.equals(storage, that.storage) - && Objects.equals(version, that.version) && Objects.equals(externalId, that.externalId) && Objects.equals(roles, that.roles); } @@ -379,7 +316,7 @@ public boolean equalsWithProcessorsCloseTo(DesiredNode that) { @Override public int hashCode() { - return Objects.hash(settings, processors, processorsRange, memory, storage, version, externalId, roles); + return Objects.hash(settings, processors, processorsRange, memory, storage, externalId, roles); } @Override @@ -408,10 +345,6 @@ public String toString() { + '}'; } - public boolean hasVersion() { - return Strings.isNullOrBlank(version) == false; - } - public record ProcessorsRange(Processors min, @Nullable Processors max) implements Writeable, ToXContentObject { private static final ParseField MIN_FIELD = new ParseField("min"); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodeWithStatus.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodeWithStatus.java index 7b89406be9aa0..606309adf205c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodeWithStatus.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodeWithStatus.java @@ -44,13 +44,12 @@ public record DesiredNodeWithStatus(DesiredNode desiredNode, Status status) (Processors) args[1], (DesiredNode.ProcessorsRange) args[2], (ByteSizeValue) args[3], - (ByteSizeValue) args[4], - (String) args[5] + (ByteSizeValue) args[4] ), // An unknown status is expected during upgrades to versions >= STATUS_TRACKING_SUPPORT_VERSION // the desired node status would be populated when a node in the newer version is elected as // master, the desired nodes status update happens in NodeJoinExecutor. - args[6] == null ? Status.PENDING : (Status) args[6] + args[5] == null ? Status.PENDING : (Status) args[5] ) ); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java index ec8bb6285bdd4..b8e1fa0c836a3 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java @@ -12,13 +12,11 @@ import org.elasticsearch.action.admin.cluster.desirednodes.UpdateDesiredNodesAction; import org.elasticsearch.action.admin.cluster.desirednodes.UpdateDesiredNodesRequest; import org.elasticsearch.client.internal.node.NodeClient; -import org.elasticsearch.cluster.metadata.DesiredNode; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xcontent.XContentParseException; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; @@ -67,16 +65,6 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli ); } - if (clusterSupportsFeature.test(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED)) { - if (updateDesiredNodesRequest.getNodes().stream().anyMatch(DesiredNode::hasVersion)) { - deprecationLogger.compatibleCritical("desired_nodes_version", VERSION_DEPRECATION_MESSAGE); - } - } else { - if (updateDesiredNodesRequest.getNodes().stream().anyMatch(n -> n.hasVersion() == false)) { - throw new XContentParseException("[node_version] field is required and must have a valid value"); - } - } - return restChannel -> client.execute( UpdateDesiredNodesAction.INSTANCE, updateDesiredNodesRequest, From ebec1a2fe2bc2b9fc40401074dbbb0dbcdc800bd Mon Sep 17 00:00:00 2001 From: Salvatore Campagna <93581129+salvatore-campagna@users.noreply.github.com> Date: Thu, 24 Oct 2024 18:25:38 +0200 Subject: [PATCH 25/60] Improve Logsdb docs including default values (#115205) This PR adds detailed documentation for `logsdb` mode, covering several key aspects of its default behavior and configuration options. It includes: - default settings for index sorting (`index.sort.field`, `index.sort.order`, etc.). - usage of synthetic `_source` by default. - information about specialized codecs and how users can override them. - default behavior for `ignore_malformed` and `ignore_above` settings, including precedence rules. - explanation of how fields without `doc_values` are handled and what we do if they are missing. --- docs/reference/data-streams/logs.asciidoc | 180 +++++++++++++++++++++- 1 file changed, 172 insertions(+), 8 deletions(-) diff --git a/docs/reference/data-streams/logs.asciidoc b/docs/reference/data-streams/logs.asciidoc index e870289bcf7be..6bb98684544a3 100644 --- a/docs/reference/data-streams/logs.asciidoc +++ b/docs/reference/data-streams/logs.asciidoc @@ -8,14 +8,6 @@ A logs data stream is a data stream type that stores log data more efficiently. In benchmarks, log data stored in a logs data stream used ~2.5 times less disk space than a regular data stream. The exact impact will vary depending on your data set. -The following features are enabled in a logs data stream: - -* <>, which omits storing the `_source` field. When the document source is requested, it is synthesized from document fields upon retrieval. - -* Index sorting. This yields a lower storage footprint. By default indices are sorted by `host.name` and `@timestamp` fields at index time. - -* More space efficient compression for fields with <> enabled. - [discrete] [[how-to-use-logsds]] === Create a logs data stream @@ -50,3 +42,175 @@ DELETE _index_template/my-index-template ---- // TEST[continued] //// + +[[logsdb-default-settings]] + +[discrete] +[[logsdb-synthtic-source]] +=== Synthetic source + +By default, `logsdb` mode uses <>, which omits storing the original `_source` +field and synthesizes it from doc values or stored fields upon document retrieval. Synthetic source comes with a few +restrictions which you can read more about in the <> section dedicated to it. + +NOTE: When dealing with multi-value fields, the `index.mapping.synthetic_source_keep` setting controls how field values +are preserved for <> reconstruction. In `logsdb`, the default value is `arrays`, +which retains both duplicate values and the order of entries but not necessarily the exact structure when it comes to +array elements or objects. Preserving duplicates and ordering could be critical for some log fields. This could be the +case, for instance, for DNS A records, HTTP headers, or log entries that represent sequential or repeated events. + +For more details on this setting and ways to refine or bypass it, check out <>. + +[discrete] +[[logsdb-sort-settings]] +=== Index sort settings + +The following settings are applied by default when using the `logsdb` mode for index sorting: + +* `index.sort.field`: `["host.name", "@timestamp"]` + In `logsdb` mode, indices are sorted by `host.name` and `@timestamp` fields by default. For data streams, the + `@timestamp` field is automatically injected if it is not present. + +* `index.sort.order`: `["desc", "desc"]` + The default sort order for both fields is descending (`desc`), prioritizing the latest data. + +* `index.sort.mode`: `["min", "min"]` + The default sort mode is `min`, ensuring that indices are sorted by the minimum value of multi-value fields. + +* `index.sort.missing`: `["_first", "_first"]` + Missing values are sorted to appear first (`_first`) in `logsdb` index mode. + +`logsdb` index mode allows users to override the default sort settings. For instance, users can specify their own fields +and order for sorting by modifying the `index.sort.field` and `index.sort.order`. + +When using default sort settings, the `host.name` field is automatically injected into the mappings of the +index as a `keyword` field to ensure that sorting can be applied. This guarantees that logs are efficiently sorted and +retrieved based on the `host.name` and `@timestamp` fields. + +NOTE: If `subobjects` is set to `true` (which is the default), the `host.name` field will be mapped as an object field +named `host`, containing a `name` child field of type `keyword`. On the other hand, if `subobjects` is set to `false`, +a single `host.name` field will be mapped as a `keyword` field. + +Once an index is created, the sort settings are immutable and cannot be modified. To apply different sort settings, +a new index must be created with the desired configuration. For data streams, this can be achieved by means of an index +rollover after updating relevant (component) templates. + +If the default sort settings are not suitable for your use case, consider modifying them. Keep in mind that sort +settings can influence indexing throughput, query latency, and may affect compression efficiency due to the way data +is organized after sorting. For more details, refer to our documentation on +<>. + +NOTE: For <>, the `@timestamp` field is automatically injected if not already present. +However, if custom sort settings are applied, the `@timestamp` field is injected into the mappings, but it is not +automatically added to the list of sort fields. + +[discrete] +[[logsdb-specialized-codecs]] +=== Specialized codecs + +`logsdb` index mode uses the `best_compression` <> by default, which applies {wikipedia}/Zstd[ZSTD] +compression to stored fields. Users are allowed to override it and switch to the `default` codec for faster compression +at the expense of slightly larger storage footprint. + +`logsdb` index mode also adopts specialized codecs for numeric doc values that are crafted to optimize storage usage. +Users can rely on these specialized codecs being applied by default when using `logsdb` index mode. + +Doc values encoding for numeric fields in `logsdb` follows a static sequence of codecs, applying each one in the +following order: delta encoding, offset encoding, Greatest Common Divisor GCD encoding, and finally Frame Of Reference +(FOR) encoding. The decision to apply each encoding is based on heuristics determined by the data distribution. +For example, before applying delta encoding, the algorithm checks if the data is monotonically non-decreasing or +non-increasing. If the data fits this pattern, delta encoding is applied; otherwise, the next encoding is considered. + +The encoding is specific to each Lucene segment and is also re-applied at segment merging time. The merged Lucene segment +may use a different encoding compared to the original Lucene segments, based on the characteristics of the merged data. + +The following methods are applied sequentially: + +* **Delta encoding**: + a compression method that stores the difference between consecutive values instead of the actual values. + +* **Offset encoding**: + a compression method that stores the difference from a base value rather than between consecutive values. + +* **Greatest Common Divisor (GCD) encoding**: + a compression method that finds the greatest common divisor of a set of values and stores the differences + as multiples of the GCD. + +* **Frame Of Reference (FOR) encoding**: + a compression method that determines the smallest number of bits required to encode a block of values and uses + bit-packing to fit such values into larger 64-bit blocks. + +For keyword fields, **Run Length Encoding (RLE)** is applied to the ordinals, which represent positions in the Lucene +segment-level keyword dictionary. This compression is used when multiple consecutive documents share the same keyword. + +[discrete] +[[logsdb-ignored-settings]] +=== `ignore_malformed`, `ignore_above`, `ignore_dynamic_beyond_limit` + +By default, `logsdb` index mode sets `ignore_malformed` to `true`. This setting allows documents with malformed fields +to be indexed without causing indexing failures, ensuring that log data ingestion continues smoothly even when some +fields contain invalid or improperly formatted data. + +Users can override this setting by setting `index.mapping.ignore_malformed` to `false`. However, this is not recommended +as it might result in documents with malformed fields being rejected and not indexed at all. + +In `logsdb` index mode, the `index.mapping.ignore_above` setting is applied by default at the index level to ensure +efficient storage and indexing of large keyword fields.The index-level default for `ignore_above` is set to 8191 +**characters**. If using UTF-8 encoding, this results in a limit of 32764 bytes, depending on character encoding. +The mapping-level `ignore_above` setting still takes precedence. If a specific field has an `ignore_above` value +defined in its mapping, that value will override the index-level `index.mapping.ignore_above` value. This default +behavior helps to optimize indexing performance by preventing excessively large string values from being indexed, while +still allowing users to customize the limit, overriding it at the mapping level or changing the index level default +setting. + +In `logsdb` index mode, the setting `index.mapping.total_fields.ignore_dynamic_beyond_limit` is set to `true` by +default. This allows dynamically mapped fields to be added on top of statically defined fields without causing document +rejection, even after the total number of fields exceeds the limit defined by `index.mapping.total_fields.limit`. The +`index.mapping.total_fields.limit` setting specifies the maximum number of fields an index can have (static, dynamic +and runtime). When the limit is reached, new dynamically mapped fields will be ignored instead of failing the document +indexing, ensuring continued log ingestion without errors. + +NOTE: When automatically injected, `host.name` and `@timestamp` contribute to the limit of mapped fields. When +`host.name` is mapped with `subobjects: true` it consists of two fields. When `host.name` is mapped with +`subobjects: false` it only consists of one field. + +[discrete] +[[logsdb-nodocvalue-fields]] +=== Fields without doc values + +When `logsdb` index mode uses synthetic `_source`, and `doc_values` are disabled for a field in the mapping, +Elasticsearch may set the `store` setting to `true` for that field as a last resort option to ensure that the field's +data is still available for reconstructing the document’s source when retrieving it via +<>. + +For example, this happens with text fields when `store` is `false` and there is no suitable multi-field available to +reconstruct the original value in <>. + +This automatic adjustment allows synthetic source to work correctly, even when doc values are not enabled for certain +fields. + +[discrete] +[[logsdb-settings-summary]] +=== LogsDB settings summary + +The following is a summary of key settings that apply when using `logsdb` index mode in Elasticsearch: + +* **`index.mode`**: `"logsdb"` + +* **`index.mapping.synthetic_source_keep`**: `"arrays"` + +* **`index.sort.field`**: `["host.name", "@timestamp"]` + +* **`index.sort.order`**: `["desc", "desc"]` + +* **`index.sort.mode`**: `["min", "min"]` + +* **`index.sort.missing`**: `["_first", "_first"]` + +* **`index.codec`**: `"best_compression"` + +* **`index.mapping.ignore_malformed`**: `true` + +* **`index.mapping.ignore_above`**: `8191` + +* **`index.mapping.total_fields.ignore_dynamic_beyond_limit`**: `true` From 160faa2dfc8c590dcb398487b79eb51eb84f8f44 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 24 Oct 2024 09:29:34 -0700 Subject: [PATCH 26/60] Re-enable threadpool blocking in Kibana system index test (#112569) KibanaThreadPoolIT checks the Kibana system user can write (using the system read/write threadpools) even when the normal read/write threadpools are blocked. This commit re-enables a key part of the test which was disabled. closes #107625 --- .../kibana/KibanaThreadPoolIT.java | 21 +++++++++++-------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java b/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java index 61bd31fea3455..553e4696af316 100644 --- a/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java +++ b/modules/kibana/src/internalClusterTest/java/org/elasticsearch/kibana/KibanaThreadPoolIT.java @@ -12,6 +12,8 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.settings.Settings; @@ -37,6 +39,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.startsWith; /** @@ -150,15 +153,15 @@ private void assertThreadPoolsBlocked() { new Thread(() -> expectThrows(EsRejectedExecutionException.class, () -> getFuture.actionGet(SAFE_AWAIT_TIMEOUT))).start(); // intentionally commented out this test until https://github.com/elastic/elasticsearch/issues/97916 is fixed - // var e3 = expectThrows( - // SearchPhaseExecutionException.class, - // () -> client().prepareSearch(USER_INDEX) - // .setQuery(QueryBuilders.matchAllQuery()) - // // Request times out if max concurrent shard requests is set to 1 - // .setMaxConcurrentShardRequests(usually() ? SearchRequest.DEFAULT_MAX_CONCURRENT_SHARD_REQUESTS : randomIntBetween(2, 10)) - // .get() - // ); - // assertThat(e3.getMessage(), containsString("all shards failed")); + var e3 = expectThrows( + SearchPhaseExecutionException.class, + () -> client().prepareSearch(USER_INDEX) + .setQuery(QueryBuilders.matchAllQuery()) + // Request times out if max concurrent shard requests is set to 1 + .setMaxConcurrentShardRequests(usually() ? SearchRequest.DEFAULT_MAX_CONCURRENT_SHARD_REQUESTS : randomIntBetween(2, 10)) + .get() + ); + assertThat(e3.getMessage(), containsString("all shards failed")); } protected void runWithBlockedThreadPools(Runnable runnable) throws Exception { From 5c1a3ada8ae7a790dfd8460c76c6a341d9d42b7a Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Thu, 24 Oct 2024 19:37:02 +0300 Subject: [PATCH 27/60] Propagate root subobjects setting to downsample indexes (#115358) * Propagate root subobjects setting to downsample indexes * exclude tests from rest compat * remove subobjects propagation --- x-pack/plugin/downsample/qa/rest/build.gradle | 14 + .../downsample/DownsampleWithBasicRestIT.java | 40 ++ .../test/downsample/10_basic.yml | 466 +++++++++--------- 3 files changed, 292 insertions(+), 228 deletions(-) create mode 100644 x-pack/plugin/downsample/qa/rest/src/yamlRestTest/java/org/elasticsearch/xpack/downsample/DownsampleWithBasicRestIT.java diff --git a/x-pack/plugin/downsample/qa/rest/build.gradle b/x-pack/plugin/downsample/qa/rest/build.gradle index ba5ac7b0c7317..5142632a36006 100644 --- a/x-pack/plugin/downsample/qa/rest/build.gradle +++ b/x-pack/plugin/downsample/qa/rest/build.gradle @@ -32,6 +32,20 @@ tasks.named('yamlRestTest') { tasks.named('yamlRestCompatTest') { usesDefaultDistribution() } +tasks.named("yamlRestCompatTestTransform").configure ({ task -> + task.skipTest("downsample/10_basic/Downsample index with empty dimension on routing path", "Skip until pr/115358 gets backported") + task.skipTest("downsample/10_basic/Downsample histogram as label", "Skip until pr/115358 gets backported") + task.skipTest("downsample/10_basic/Downsample date timestamp field using strict_date_optional_time_nanos format", + "Skip until pr/115358 gets backported") + task.skipTest("downsample/10_basic/Downsample a downsampled index", "Skip until pr/115358 gets backported") + task.skipTest("downsample/10_basic/Downsample date_nanos timestamp field using custom format", "Skip until pr/115358 gets backported") + task.skipTest("downsample/10_basic/Downsample using coarse grained timestamp", "Skip until pr/115358 gets backported") + task.skipTest("downsample/10_basic/Downsample label with ignore_above", "Skip until pr/115358 gets backported") + task.skipTest("downsample/10_basic/Downsample object field", "Skip until pr/115358 gets backported") + task.skipTest("downsample/10_basic/Downsample empty and missing labels", "Skip until pr/115358 gets backported") + task.skipTest("downsample/10_basic/Downsample index", "Skip until pr/115358 gets backported") + task.skipTest("downsample/10_basic/Downsample index with empty dimension", "Skip until pr/115358 gets backported") +}) if (BuildParams.inFipsJvm){ // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC tasks.named("yamlRestTest").configure{enabled = false } diff --git a/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/java/org/elasticsearch/xpack/downsample/DownsampleWithBasicRestIT.java b/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/java/org/elasticsearch/xpack/downsample/DownsampleWithBasicRestIT.java new file mode 100644 index 0000000000000..8f75e76315844 --- /dev/null +++ b/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/java/org/elasticsearch/xpack/downsample/DownsampleWithBasicRestIT.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.downsample; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.junit.ClassRule; + +public class DownsampleWithBasicRestIT extends ESClientYamlSuiteTestCase { + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .setting("xpack.security.enabled", "false") + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + public DownsampleWithBasicRestIT(final ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } + +} diff --git a/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/10_basic.yml b/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/10_basic.yml index 0bcd35cc69038..fa3560bec516e 100644 --- a/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/10_basic.yml +++ b/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/10_basic.yml @@ -16,6 +16,7 @@ setup: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z mappings: + subobjects: false properties: "@timestamp": type: date @@ -106,6 +107,7 @@ setup: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z mappings: + subobjects: false properties: "@timestamp": type: date @@ -172,6 +174,7 @@ setup: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z mappings: + subobjects: false properties: "@timestamp": type: date @@ -237,6 +240,7 @@ setup: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z mappings: + subobjects: false properties: "@timestamp": type: date @@ -318,29 +322,29 @@ setup: - length: { hits.hits: 4 } - match: { hits.hits.0._source._doc_count: 2 } - - match: { hits.hits.0._source.k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } + - match: { hits.hits.0._source.k8s\.pod\.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } - match: { hits.hits.0._source.metricset: pod } - match: { hits.hits.0._source.@timestamp: 2021-04-28T18:00:00.000Z } - - match: { hits.hits.0._source.k8s.pod.multi-counter: 0 } - - match: { hits.hits.0._source.k8s.pod.scaled-counter: 0.00 } - - match: { hits.hits.0._source.k8s.pod.multi-gauge.min: 100 } - - match: { hits.hits.0._source.k8s.pod.multi-gauge.max: 102 } - - match: { hits.hits.0._source.k8s.pod.multi-gauge.sum: 607 } - - match: { hits.hits.0._source.k8s.pod.multi-gauge.value_count: 6 } - - match: { hits.hits.0._source.k8s.pod.scaled-gauge.min: 100.0 } - - match: { hits.hits.0._source.k8s.pod.scaled-gauge.max: 101.0 } - - match: { hits.hits.0._source.k8s.pod.scaled-gauge.sum: 201.0 } - - match: { hits.hits.0._source.k8s.pod.scaled-gauge.value_count: 2 } - - match: { hits.hits.0._source.k8s.pod.network.tx.min: 1434521831 } - - match: { hits.hits.0._source.k8s.pod.network.tx.max: 1434577921 } - - match: { hits.hits.0._source.k8s.pod.network.tx.value_count: 2 } - - match: { hits.hits.0._source.k8s.pod.ip: "10.10.55.56" } - - match: { hits.hits.0._source.k8s.pod.created_at: "2021-04-28T19:43:00.000Z" } - - match: { hits.hits.0._source.k8s.pod.number_of_containers: 1 } - - match: { hits.hits.0._source.k8s.pod.tags: ["backend", "test", "us-west2"] } - - match: { hits.hits.0._source.k8s.pod.values: [1, 1, 2] } - - is_false: hits.hits.0._source.k8s.pod.running + - match: { hits.hits.0._source.k8s\.pod\.multi-counter: 0 } + - match: { hits.hits.0._source.k8s\.pod\.scaled-counter: 0.00 } + - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.min: 100 } + - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.max: 102 } + - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.sum: 607 } + - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.value_count: 6 } + - match: { hits.hits.0._source.k8s\.pod\.scaled-gauge.min: 100.0 } + - match: { hits.hits.0._source.k8s\.pod\.scaled-gauge.max: 101.0 } + - match: { hits.hits.0._source.k8s\.pod\.scaled-gauge.sum: 201.0 } + - match: { hits.hits.0._source.k8s\.pod\.scaled-gauge.value_count: 2 } + - match: { hits.hits.0._source.k8s\.pod\.network\.tx.min: 1434521831 } + - match: { hits.hits.0._source.k8s\.pod\.network\.tx.max: 1434577921 } + - match: { hits.hits.0._source.k8s\.pod\.network\.tx.value_count: 2 } + - match: { hits.hits.0._source.k8s\.pod\.ip: "10.10.55.56" } + - match: { hits.hits.0._source.k8s\.pod\.created_at: "2021-04-28T19:43:00.000Z" } + - match: { hits.hits.0._source.k8s\.pod\.number_of_containers: 1 } + - match: { hits.hits.0._source.k8s\.pod\.tags: ["backend", "test", "us-west2"] } + - match: { hits.hits.0._source.k8s\.pod\.values: [1, 1, 2] } + - is_false: hits.hits.0._source.k8s\.pod\.running # Assert rollup index settings - do: @@ -362,21 +366,21 @@ setup: - match: { test-downsample.mappings.properties.@timestamp.type: date } - match: { test-downsample.mappings.properties.@timestamp.meta.fixed_interval: 1h } - match: { test-downsample.mappings.properties.@timestamp.meta.time_zone: UTC } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.multi-gauge.type: aggregate_metric_double } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.multi-gauge.metrics: [ "min", "max", "sum", "value_count" ] } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.multi-gauge.default_metric: max } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.multi-gauge.time_series_metric: gauge } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.multi-counter.type: long } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.multi-counter.time_series_metric: counter } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-counter.type: scaled_float } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-counter.scaling_factor: 100 } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-counter.time_series_metric: counter } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-gauge.type: aggregate_metric_double } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-gauge.metrics: [ "min", "max", "sum", "value_count" ] } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-gauge.default_metric: max } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-gauge.time_series_metric: gauge } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.uid.type: keyword } - - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.uid.time_series_dimension: true } + - match: { test-downsample.mappings.properties.k8s\.pod\.multi-gauge.type: aggregate_metric_double } + - match: { test-downsample.mappings.properties.k8s\.pod\.multi-gauge.metrics: [ "min", "max", "sum", "value_count" ] } + - match: { test-downsample.mappings.properties.k8s\.pod\.multi-gauge.default_metric: max } + - match: { test-downsample.mappings.properties.k8s\.pod\.multi-gauge.time_series_metric: gauge } + - match: { test-downsample.mappings.properties.k8s\.pod\.multi-counter.type: long } + - match: { test-downsample.mappings.properties.k8s\.pod\.multi-counter.time_series_metric: counter } + - match: { test-downsample.mappings.properties.k8s\.pod\.scaled-counter.type: scaled_float } + - match: { test-downsample.mappings.properties.k8s\.pod\.scaled-counter.scaling_factor: 100 } + - match: { test-downsample.mappings.properties.k8s\.pod\.scaled-counter.time_series_metric: counter } + - match: { test-downsample.mappings.properties.k8s\.pod\.scaled-gauge.type: aggregate_metric_double } + - match: { test-downsample.mappings.properties.k8s\.pod\.scaled-gauge.metrics: [ "min", "max", "sum", "value_count" ] } + - match: { test-downsample.mappings.properties.k8s\.pod\.scaled-gauge.default_metric: max } + - match: { test-downsample.mappings.properties.k8s\.pod\.scaled-gauge.time_series_metric: gauge } + - match: { test-downsample.mappings.properties.k8s\.pod\.uid.type: keyword } + - match: { test-downsample.mappings.properties.k8s\.pod\.uid.time_series_dimension: true } # Assert source index has not been deleted @@ -763,18 +767,18 @@ setup: - match: { test-downsample-2.mappings.properties.@timestamp.type: date } - match: { test-downsample-2.mappings.properties.@timestamp.meta.fixed_interval: 2h } - match: { test-downsample-2.mappings.properties.@timestamp.meta.time_zone: UTC } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.multi-gauge.type: aggregate_metric_double } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.multi-gauge.metrics: [ "min", "max", "sum", "value_count" ] } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.multi-gauge.default_metric: max } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.multi-gauge.time_series_metric: gauge } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.multi-counter.type: long } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.multi-counter.time_series_metric: counter } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.uid.type: keyword } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.uid.time_series_dimension: true } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.network.properties.tx.type: aggregate_metric_double } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.network.properties.tx.metrics: [ "min", "max", "sum", "value_count" ] } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.network.properties.tx.default_metric: max } - - match: { test-downsample-2.mappings.properties.k8s.properties.pod.properties.network.properties.tx.time_series_metric: gauge } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.multi-gauge.type: aggregate_metric_double } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.multi-gauge.metrics: [ "min", "max", "sum", "value_count" ] } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.multi-gauge.default_metric: max } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.multi-gauge.time_series_metric: gauge } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.multi-counter.type: long } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.multi-counter.time_series_metric: counter } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.uid.type: keyword } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.uid.time_series_dimension: true } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.network\.tx.type: aggregate_metric_double } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.network\.tx.metrics: [ "min", "max", "sum", "value_count" ] } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.network\.tx.default_metric: max } + - match: { test-downsample-2.mappings.properties.k8s\.pod\.network\.tx.time_series_metric: gauge } - do: search: @@ -784,29 +788,29 @@ setup: - length: { hits.hits: 3 } - match: { hits.hits.0._source._doc_count: 4 } - - match: { hits.hits.0._source.k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } + - match: { hits.hits.0._source.k8s\.pod\.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } - match: { hits.hits.0._source.metricset: pod } - match: { hits.hits.0._source.@timestamp: 2021-04-28T18:00:00.000Z } - - match: { hits.hits.0._source.k8s.pod.multi-counter: 76 } - - match: { hits.hits.0._source.k8s.pod.multi-gauge.min: 95.0 } - - match: { hits.hits.0._source.k8s.pod.multi-gauge.max: 110.0 } - - match: { hits.hits.0._source.k8s.pod.multi-gauge.sum: 1209.0 } - - match: { hits.hits.0._source.k8s.pod.multi-gauge.value_count: 12 } - - match: { hits.hits.0._source.k8s.pod.network.tx.min: 1434521831 } - - match: { hits.hits.0._source.k8s.pod.network.tx.max: 1434595272 } - - match: { hits.hits.0._source.k8s.pod.network.tx.value_count: 4 } - - match: { hits.hits.0._source.k8s.pod.ip: "10.10.55.120" } - - match: { hits.hits.0._source.k8s.pod.created_at: "2021-04-28T19:45:00.000Z" } - - match: { hits.hits.0._source.k8s.pod.number_of_containers: 1 } - - match: { hits.hits.0._source.k8s.pod.tags: [ "backend", "test", "us-west1" ] } - - match: { hits.hits.0._source.k8s.pod.values: [ 1, 2, 3 ] } - - - match: { hits.hits.1._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.0._source.k8s\.pod\.multi-counter: 76 } + - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.min: 95.0 } + - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.max: 110.0 } + - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.sum: 1209.0 } + - match: { hits.hits.0._source.k8s\.pod\.multi-gauge.value_count: 12 } + - match: { hits.hits.0._source.k8s\.pod\.network\.tx.min: 1434521831 } + - match: { hits.hits.0._source.k8s\.pod\.network\.tx.max: 1434595272 } + - match: { hits.hits.0._source.k8s\.pod\.network\.tx.value_count: 4 } + - match: { hits.hits.0._source.k8s\.pod\.ip: "10.10.55.120" } + - match: { hits.hits.0._source.k8s\.pod\.created_at: "2021-04-28T19:45:00.000Z" } + - match: { hits.hits.0._source.k8s\.pod\.number_of_containers: 1 } + - match: { hits.hits.0._source.k8s\.pod\.tags: [ "backend", "test", "us-west1" ] } + - match: { hits.hits.0._source.k8s\.pod\.values: [ 1, 2, 3 ] } + + - match: { hits.hits.1._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.1._source.metricset: pod } - match: { hits.hits.1._source.@timestamp: 2021-04-28T18:00:00.000Z } - match: { hits.hits.1._source._doc_count: 2 } - - match: { hits.hits.2._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.2._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.2._source.metricset: pod } - match: { hits.hits.2._source.@timestamp: 2021-04-28T20:00:00.000Z } - match: { hits.hits.2._source._doc_count: 2 } @@ -890,16 +894,16 @@ setup: - match: { test-downsample-histogram.mappings.properties.@timestamp.type: date } - match: { test-downsample-histogram.mappings.properties.@timestamp.meta.fixed_interval: 1h } - match: { test-downsample-histogram.mappings.properties.@timestamp.meta.time_zone: UTC } - - match: { test-downsample-histogram.mappings.properties.k8s.properties.pod.properties.latency.type: histogram } - - match: { test-downsample-histogram.mappings.properties.k8s.properties.pod.properties.latency.time_series_metric: null } - - match: { test-downsample-histogram.mappings.properties.k8s.properties.pod.properties.empty-histogram.type: histogram } - - match: { test-downsample-histogram.mappings.properties.k8s.properties.pod.properties.empty-histogram.time_series_metric: null } - - match: { test-downsample-histogram.mappings.properties.k8s.properties.pod.properties.uid.type: keyword } - - match: { test-downsample-histogram.mappings.properties.k8s.properties.pod.properties.uid.time_series_dimension: true } - - match: { test-downsample-histogram.mappings.properties.k8s.properties.pod.properties.network.properties.tx.type: aggregate_metric_double } - - match: { test-downsample-histogram.mappings.properties.k8s.properties.pod.properties.network.properties.tx.metrics: [ "min", "max", "sum", "value_count" ] } - - match: { test-downsample-histogram.mappings.properties.k8s.properties.pod.properties.network.properties.tx.default_metric: max } - - match: { test-downsample-histogram.mappings.properties.k8s.properties.pod.properties.network.properties.tx.time_series_metric: gauge } + - match: { test-downsample-histogram.mappings.properties.k8s\.pod\.latency.type: histogram } + - match: { test-downsample-histogram.mappings.properties.k8s\.pod\.latency.time_series_metric: null } + - match: { test-downsample-histogram.mappings.properties.k8s\.pod\.empty-histogram.type: histogram } + - match: { test-downsample-histogram.mappings.properties.k8s\.pod\.empty-histogram.time_series_metric: null } + - match: { test-downsample-histogram.mappings.properties.k8s\.pod\.uid.type: keyword } + - match: { test-downsample-histogram.mappings.properties.k8s\.pod\.uid.time_series_dimension: true } + - match: { test-downsample-histogram.mappings.properties.k8s\.pod\.network\.tx.type: aggregate_metric_double } + - match: { test-downsample-histogram.mappings.properties.k8s\.pod\.network\.tx.metrics: [ "min", "max", "sum", "value_count" ] } + - match: { test-downsample-histogram.mappings.properties.k8s\.pod\.network\.tx.default_metric: max } + - match: { test-downsample-histogram.mappings.properties.k8s\.pod\.network\.tx.time_series_metric: gauge } - do: search: @@ -910,64 +914,64 @@ setup: - length: { hits.hits: 4 } - match: { hits.hits.0._source._doc_count: 2 } - - match: { hits.hits.0._source.k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } + - match: { hits.hits.0._source.k8s\.pod\.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } - match: { hits.hits.0._source.metricset: pod } - match: { hits.hits.0._source.@timestamp: 2021-04-28T18:00:00.000Z } - - length: { hits.hits.0._source.k8s.pod.latency.counts: 4 } - - match: { hits.hits.0._source.k8s.pod.latency.counts.0: 2 } - - match: { hits.hits.0._source.k8s.pod.latency.counts.1: 2 } - - match: { hits.hits.0._source.k8s.pod.latency.counts.2: 8 } - - match: { hits.hits.0._source.k8s.pod.latency.counts.3: 8 } - - length: { hits.hits.0._source.k8s.pod.latency.values: 4 } - - match: { hits.hits.0._source.k8s.pod.latency.values.0: 1.0 } - - match: { hits.hits.0._source.k8s.pod.latency.values.1: 10.0 } - - match: { hits.hits.0._source.k8s.pod.latency.values.2: 100.0 } - - match: { hits.hits.0._source.k8s.pod.latency.values.3: 1000.0 } + - length: { hits.hits.0._source.k8s\.pod\.latency.counts: 4 } + - match: { hits.hits.0._source.k8s\.pod\.latency.counts.0: 2 } + - match: { hits.hits.0._source.k8s\.pod\.latency.counts.1: 2 } + - match: { hits.hits.0._source.k8s\.pod\.latency.counts.2: 8 } + - match: { hits.hits.0._source.k8s\.pod\.latency.counts.3: 8 } + - length: { hits.hits.0._source.k8s\.pod\.latency.values: 4 } + - match: { hits.hits.0._source.k8s\.pod\.latency.values.0: 1.0 } + - match: { hits.hits.0._source.k8s\.pod\.latency.values.1: 10.0 } + - match: { hits.hits.0._source.k8s\.pod\.latency.values.2: 100.0 } + - match: { hits.hits.0._source.k8s\.pod\.latency.values.3: 1000.0 } - match: { hits.hits.1._source._doc_count: 1 } - - match: { hits.hits.1._source.k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } + - match: { hits.hits.1._source.k8s\.pod\.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } - match: { hits.hits.1._source.metricset: pod } - match: { hits.hits.1._source.@timestamp: 2021-04-28T19:00:00.000Z } - - length: { hits.hits.1._source.k8s.pod.latency.counts: 4 } - - match: { hits.hits.1._source.k8s.pod.latency.counts.0: 4 } - - match: { hits.hits.1._source.k8s.pod.latency.counts.1: 5 } - - match: { hits.hits.1._source.k8s.pod.latency.counts.2: 4 } - - match: { hits.hits.1._source.k8s.pod.latency.counts.3: 13 } - - length: { hits.hits.1._source.k8s.pod.latency.values: 4 } - - match: { hits.hits.1._source.k8s.pod.latency.values.0: 1.0 } - - match: { hits.hits.1._source.k8s.pod.latency.values.1: 10.0 } - - match: { hits.hits.1._source.k8s.pod.latency.values.2: 100.0 } - - match: { hits.hits.1._source.k8s.pod.latency.values.3: 1000.0 } + - length: { hits.hits.1._source.k8s\.pod\.latency.counts: 4 } + - match: { hits.hits.1._source.k8s\.pod\.latency.counts.0: 4 } + - match: { hits.hits.1._source.k8s\.pod\.latency.counts.1: 5 } + - match: { hits.hits.1._source.k8s\.pod\.latency.counts.2: 4 } + - match: { hits.hits.1._source.k8s\.pod\.latency.counts.3: 13 } + - length: { hits.hits.1._source.k8s\.pod\.latency.values: 4 } + - match: { hits.hits.1._source.k8s\.pod\.latency.values.0: 1.0 } + - match: { hits.hits.1._source.k8s\.pod\.latency.values.1: 10.0 } + - match: { hits.hits.1._source.k8s\.pod\.latency.values.2: 100.0 } + - match: { hits.hits.1._source.k8s\.pod\.latency.values.3: 1000.0 } - match: { hits.hits.2._source._doc_count: 2 } - - match: { hits.hits.2._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.2._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.2._source.metricset: pod } - match: { hits.hits.2._source.@timestamp: 2021-04-28T18:00:00.000Z } - - length: { hits.hits.2._source.k8s.pod.latency.counts: 4 } - - match: { hits.hits.2._source.k8s.pod.latency.counts.0: 8 } - - match: { hits.hits.2._source.k8s.pod.latency.counts.1: 7 } - - match: { hits.hits.2._source.k8s.pod.latency.counts.2: 10 } - - match: { hits.hits.2._source.k8s.pod.latency.counts.3: 12 } - - length: { hits.hits.2._source.k8s.pod.latency.values: 4 } - - match: { hits.hits.2._source.k8s.pod.latency.values.0: 1.0 } - - match: { hits.hits.2._source.k8s.pod.latency.values.1: 2.0 } - - match: { hits.hits.2._source.k8s.pod.latency.values.2: 5.0 } - - match: { hits.hits.2._source.k8s.pod.latency.values.3: 10.0 } + - length: { hits.hits.2._source.k8s\.pod\.latency.counts: 4 } + - match: { hits.hits.2._source.k8s\.pod\.latency.counts.0: 8 } + - match: { hits.hits.2._source.k8s\.pod\.latency.counts.1: 7 } + - match: { hits.hits.2._source.k8s\.pod\.latency.counts.2: 10 } + - match: { hits.hits.2._source.k8s\.pod\.latency.counts.3: 12 } + - length: { hits.hits.2._source.k8s\.pod\.latency.values: 4 } + - match: { hits.hits.2._source.k8s\.pod\.latency.values.0: 1.0 } + - match: { hits.hits.2._source.k8s\.pod\.latency.values.1: 2.0 } + - match: { hits.hits.2._source.k8s\.pod\.latency.values.2: 5.0 } + - match: { hits.hits.2._source.k8s\.pod\.latency.values.3: 10.0 } - match: { hits.hits.3._source._doc_count: 2 } - - match: { hits.hits.3._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.3._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.3._source.metricset: pod } - match: { hits.hits.3._source.@timestamp: 2021-04-28T19:00:00.000Z } - - length: { hits.hits.3._source.k8s.pod.latency.counts: 4 } - - match: { hits.hits.3._source.k8s.pod.latency.counts.0: 7 } - - match: { hits.hits.3._source.k8s.pod.latency.counts.1: 15 } - - match: { hits.hits.3._source.k8s.pod.latency.counts.2: 10 } - - match: { hits.hits.3._source.k8s.pod.latency.counts.3: 10 } - - length: { hits.hits.3._source.k8s.pod.latency.values: 4 } - - match: { hits.hits.3._source.k8s.pod.latency.values.0: 1.0 } - - match: { hits.hits.3._source.k8s.pod.latency.values.1: 2.0 } - - match: { hits.hits.3._source.k8s.pod.latency.values.2: 5.0 } - - match: { hits.hits.3._source.k8s.pod.latency.values.3: 10.0 } + - length: { hits.hits.3._source.k8s\.pod\.latency.counts: 4 } + - match: { hits.hits.3._source.k8s\.pod\.latency.counts.0: 7 } + - match: { hits.hits.3._source.k8s\.pod\.latency.counts.1: 15 } + - match: { hits.hits.3._source.k8s\.pod\.latency.counts.2: 10 } + - match: { hits.hits.3._source.k8s\.pod\.latency.counts.3: 10 } + - length: { hits.hits.3._source.k8s\.pod\.latency.values: 4 } + - match: { hits.hits.3._source.k8s\.pod\.latency.values.0: 1.0 } + - match: { hits.hits.3._source.k8s\.pod\.latency.values.1: 2.0 } + - match: { hits.hits.3._source.k8s\.pod\.latency.values.2: 5.0 } + - match: { hits.hits.3._source.k8s\.pod\.latency.values.3: 10.0 } --- "Downsample date_nanos timestamp field using custom format": @@ -988,6 +992,7 @@ setup: start_time: 2023-02-23T00:00:00Z end_time: 2023-02-24T00:00:00Z mappings: + subobjects: false properties: "@timestamp": type: date_nanos @@ -1048,19 +1053,19 @@ setup: - length: { hits.hits: 2 } - match: { hits.hits.0._source._doc_count: 3 } - - match: { hits.hits.0._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.0._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.0._source.metricset: pod } - match: { hits.hits.0._source.@timestamp: 2023-02-23T12:00:00.000000000Z } - - match: { hits.hits.0._source.k8s.pod.value.min: 8.0 } - - match: { hits.hits.0._source.k8s.pod.value.max: 12.0 } - - match: { hits.hits.0._source.k8s.pod.value.sum: 30.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.min: 8.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.max: 12.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.sum: 30.0 } - match: { hits.hits.1._source._doc_count: 1 } - - match: { hits.hits.1._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.1._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.1._source.metricset: pod } - match: { hits.hits.1._source.@timestamp: 2023-02-23T13:00:00.000000000Z } - - match: { hits.hits.1._source.k8s.pod.value.min: 9.0 } - - match: { hits.hits.1._source.k8s.pod.value.max: 9.0 } - - match: { hits.hits.1._source.k8s.pod.value.sum: 9.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.min: 9.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.max: 9.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.sum: 9.0 } - do: indices.get_mapping: @@ -1090,6 +1095,7 @@ setup: start_time: 2023-02-23T00:00:00Z end_time: 2023-02-24T00:00:00Z mappings: + subobjects: false properties: "@timestamp": type: date @@ -1150,19 +1156,19 @@ setup: - length: { hits.hits: 2 } - match: { hits.hits.0._source._doc_count: 3 } - - match: { hits.hits.0._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.0._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.0._source.metricset: pod } - match: { hits.hits.0._source.@timestamp: 2023-02-23T12:00:00.000Z } - - match: { hits.hits.0._source.k8s.pod.value.min: 8.0 } - - match: { hits.hits.0._source.k8s.pod.value.max: 12.0 } - - match: { hits.hits.0._source.k8s.pod.value.sum: 30.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.min: 8.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.max: 12.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.sum: 30.0 } - match: { hits.hits.1._source._doc_count: 1 } - - match: { hits.hits.1._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.1._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.1._source.metricset: pod } - match: { hits.hits.1._source.@timestamp: 2023-02-23T13:00:00.000Z } - - match: { hits.hits.1._source.k8s.pod.value.min: 9.0 } - - match: { hits.hits.1._source.k8s.pod.value.max: 9.0 } - - match: { hits.hits.1._source.k8s.pod.value.sum: 9.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.min: 9.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.max: 9.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.sum: 9.0 } - do: indices.get_mapping: @@ -1192,6 +1198,7 @@ setup: start_time: 2023-02-23T00:00:00Z end_time: 2023-02-27T00:00:00Z mappings: + subobjects: false properties: "@timestamp": type: date @@ -1251,33 +1258,33 @@ setup: - length: { hits.hits: 4 } - match: { hits.hits.0._source._doc_count: 1 } - - match: { hits.hits.0._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.0._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.0._source.metricset: pod } - match: { hits.hits.0._source.@timestamp: 2023-02-23 } - - match: { hits.hits.0._source.k8s.pod.value.min: 10.0 } - - match: { hits.hits.0._source.k8s.pod.value.max: 10.0 } - - match: { hits.hits.0._source.k8s.pod.value.sum: 10.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.min: 10.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.max: 10.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.sum: 10.0 } - match: { hits.hits.1._source._doc_count: 1 } - - match: { hits.hits.1._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.1._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.1._source.metricset: pod } - match: { hits.hits.1._source.@timestamp: 2023-02-24 } - - match: { hits.hits.1._source.k8s.pod.value.min: 12.0 } - - match: { hits.hits.1._source.k8s.pod.value.max: 12.0 } - - match: { hits.hits.1._source.k8s.pod.value.sum: 12.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.min: 12.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.max: 12.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.sum: 12.0 } - match: { hits.hits.2._source._doc_count: 1 } - - match: { hits.hits.2._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.2._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.2._source.metricset: pod } - match: { hits.hits.2._source.@timestamp: 2023-02-25 } - - match: { hits.hits.2._source.k8s.pod.value.min: 8.0 } - - match: { hits.hits.2._source.k8s.pod.value.max: 8.0 } - - match: { hits.hits.2._source.k8s.pod.value.sum: 8.0 } + - match: { hits.hits.2._source.k8s\.pod\.value.min: 8.0 } + - match: { hits.hits.2._source.k8s\.pod\.value.max: 8.0 } + - match: { hits.hits.2._source.k8s\.pod\.value.sum: 8.0 } - match: { hits.hits.3._source._doc_count: 1 } - - match: { hits.hits.3._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.3._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.3._source.metricset: pod } - match: { hits.hits.3._source.@timestamp: 2023-02-26 } - - match: { hits.hits.3._source.k8s.pod.value.min: 9.0 } - - match: { hits.hits.3._source.k8s.pod.value.max: 9.0 } - - match: { hits.hits.3._source.k8s.pod.value.sum: 9.0 } + - match: { hits.hits.3._source.k8s\.pod\.value.min: 9.0 } + - match: { hits.hits.3._source.k8s\.pod\.value.max: 9.0 } + - match: { hits.hits.3._source.k8s\.pod\.value.sum: 9.0 } --- "Downsample object field": @@ -1304,48 +1311,48 @@ setup: - length: { hits.hits: 4 } - match: { hits.hits.0._source._doc_count: 2 } - - match: { hits.hits.0._source.k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } + - match: { hits.hits.0._source.k8s\.pod\.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } - match: { hits.hits.0._source.metricset: pod } - match: { hits.hits.0._source.@timestamp: "2021-04-28T18:00:00.000Z" } - - match: { hits.hits.0._source.k8s.pod.name: "dog" } - - match: { hits.hits.0._source.k8s.pod.value.min: 9.0 } - - match: { hits.hits.0._source.k8s.pod.value.max: 16.0 } - - match: { hits.hits.0._source.k8s.pod.value.sum: 25.0 } - - match: { hits.hits.0._source.k8s.pod.agent.id: "second" } - - match: { hits.hits.0._source.k8s.pod.agent.version: "2.1.7" } + - match: { hits.hits.0._source.k8s\.pod\.name: "dog" } + - match: { hits.hits.0._source.k8s\.pod\.value.min: 9.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.max: 16.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.sum: 25.0 } + - match: { hits.hits.0._source.k8s\.pod\.agent\.id: "second" } + - match: { hits.hits.0._source.k8s\.pod\.agent\.version: "2.1.7" } - match: { hits.hits.1._source._doc_count: 2 } - - match: { hits.hits.1._source.k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } + - match: { hits.hits.1._source.k8s\.pod\.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } - match: { hits.hits.1._source.metricset: pod } - match: { hits.hits.1._source.@timestamp: "2021-04-28T19:00:00.000Z" } - - match: { hits.hits.1._source.k8s.pod.name: "dog" } - - match: { hits.hits.1._source.k8s.pod.value.min: 17.0 } - - match: { hits.hits.1._source.k8s.pod.value.max: 25.0 } - - match: { hits.hits.1._source.k8s.pod.value.sum: 42.0 } - - match: { hits.hits.1._source.k8s.pod.agent.id: "second" } - - match: { hits.hits.1._source.k8s.pod.agent.version: "2.1.7" } + - match: { hits.hits.1._source.k8s\.pod\.name: "dog" } + - match: { hits.hits.1._source.k8s\.pod\.value.min: 17.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.max: 25.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.sum: 42.0 } + - match: { hits.hits.1._source.k8s\.pod\.agent\.id: "second" } + - match: { hits.hits.1._source.k8s\.pod\.agent\.version: "2.1.7" } - match: { hits.hits.2._source._doc_count: 2 } - - match: { hits.hits.2._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.2._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.2._source.metricset: pod } - match: { hits.hits.2._source.@timestamp: "2021-04-28T18:00:00.000Z" } - - match: { hits.hits.2._source.k8s.pod.name: "cat" } - - match: { hits.hits.2._source.k8s.pod.value.min: 10.0 } - - match: { hits.hits.2._source.k8s.pod.value.max: 20.0 } - - match: { hits.hits.2._source.k8s.pod.value.sum: 30.0 } - - match: { hits.hits.2._source.k8s.pod.agent.id: "first" } - - match: { hits.hits.2._source.k8s.pod.agent.version: "2.0.4" } + - match: { hits.hits.2._source.k8s\.pod\.name: "cat" } + - match: { hits.hits.2._source.k8s\.pod\.value.min: 10.0 } + - match: { hits.hits.2._source.k8s\.pod\.value.max: 20.0 } + - match: { hits.hits.2._source.k8s\.pod\.value.sum: 30.0 } + - match: { hits.hits.2._source.k8s\.pod\.agent\.id: "first" } + - match: { hits.hits.2._source.k8s\.pod\.agent\.version: "2.0.4" } - match: { hits.hits.3._source._doc_count: 2 } - - match: { hits.hits.3._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.3._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.3._source.metricset: pod } - match: { hits.hits.3._source.@timestamp: "2021-04-28T20:00:00.000Z" } - - match: { hits.hits.3._source.k8s.pod.name: "cat" } - - match: { hits.hits.3._source.k8s.pod.value.min: 12.0 } - - match: { hits.hits.3._source.k8s.pod.value.max: 15.0 } - - match: { hits.hits.3._source.k8s.pod.value.sum: 27.0 } - - match: { hits.hits.3._source.k8s.pod.agent.id: "first" } - - match: { hits.hits.3._source.k8s.pod.agent.version: "2.0.4" } + - match: { hits.hits.3._source.k8s\.pod\.name: "cat" } + - match: { hits.hits.3._source.k8s\.pod\.value.min: 12.0 } + - match: { hits.hits.3._source.k8s\.pod\.value.max: 15.0 } + - match: { hits.hits.3._source.k8s\.pod\.value.sum: 27.0 } + - match: { hits.hits.3._source.k8s\.pod\.agent\.id: "first" } + - match: { hits.hits.3._source.k8s\.pod\.agent\.version: "2.0.4" } --- "Downsample empty and missing labels": @@ -1372,40 +1379,40 @@ setup: - length: { hits.hits: 3 } - match: { hits.hits.2._source._doc_count: 4 } - - match: { hits.hits.2._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.2._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } - match: { hits.hits.2._source.metricset: pod } - match: { hits.hits.2._source.@timestamp: "2021-04-28T18:00:00.000Z" } - - match: { hits.hits.2._source.k8s.pod.name: "cat" } - - match: { hits.hits.2._source.k8s.pod.value.min: 10.0 } - - match: { hits.hits.2._source.k8s.pod.value.max: 40.0 } - - match: { hits.hits.2._source.k8s.pod.value.sum: 100.0 } - - match: { hits.hits.2._source.k8s.pod.value.value_count: 4 } - - match: { hits.hits.2._source.k8s.pod.label: "abc" } - - match: { hits.hits.2._source.k8s.pod.unmapped: "abc" } + - match: { hits.hits.2._source.k8s\.pod\.name: "cat" } + - match: { hits.hits.2._source.k8s\.pod\.value.min: 10.0 } + - match: { hits.hits.2._source.k8s\.pod\.value.max: 40.0 } + - match: { hits.hits.2._source.k8s\.pod\.value.sum: 100.0 } + - match: { hits.hits.2._source.k8s\.pod\.value.value_count: 4 } + - match: { hits.hits.2._source.k8s\.pod\.label: "abc" } + - match: { hits.hits.2._source.k8s\.pod\.unmapped: "abc" } - match: { hits.hits.1._source._doc_count: 4 } - - match: { hits.hits.1._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e9597ab } + - match: { hits.hits.1._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e9597ab } - match: { hits.hits.1._source.metricset: pod } - match: { hits.hits.1._source.@timestamp: "2021-04-28T18:00:00.000Z" } - - match: { hits.hits.1._source.k8s.pod.name: "cat" } - - match: { hits.hits.1._source.k8s.pod.value.min: 10.0 } - - match: { hits.hits.1._source.k8s.pod.value.max: 40.0 } - - match: { hits.hits.1._source.k8s.pod.value.sum: 100.0 } - - match: { hits.hits.1._source.k8s.pod.value.value_count: 4 } - - match: { hits.hits.1._source.k8s.pod.label: null } - - match: { hits.hits.1._source.k8s.pod.unmapped: null } + - match: { hits.hits.1._source.k8s\.pod\.name: "cat" } + - match: { hits.hits.1._source.k8s\.pod\.value.min: 10.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.max: 40.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.sum: 100.0 } + - match: { hits.hits.1._source.k8s\.pod\.value.value_count: 4 } + - match: { hits.hits.1._source.k8s\.pod\.label: null } + - match: { hits.hits.1._source.k8s\.pod\.unmapped: null } - match: { hits.hits.0._source._doc_count: 4 } - - match: { hits.hits.0._source.k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } + - match: { hits.hits.0._source.k8s\.pod\.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } - match: { hits.hits.0._source.metricset: pod } - match: { hits.hits.0._source.@timestamp: "2021-04-28T18:00:00.000Z" } - - match: { hits.hits.0._source.k8s.pod.name: "dog" } - - match: { hits.hits.0._source.k8s.pod.value.min: 10.0 } - - match: { hits.hits.0._source.k8s.pod.value.max: 40.0 } - - match: { hits.hits.0._source.k8s.pod.value.sum: 100.0 } - - match: { hits.hits.0._source.k8s.pod.value.value_count: 4 } - - match: { hits.hits.0._source.k8s.pod.label: "xyz" } - - match: { hits.hits.0._source.k8s.pod.unmapped: "xyz" } + - match: { hits.hits.0._source.k8s\.pod\.name: "dog" } + - match: { hits.hits.0._source.k8s\.pod\.value.min: 10.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.max: 40.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.sum: 100.0 } + - match: { hits.hits.0._source.k8s\.pod\.value.value_count: 4 } + - match: { hits.hits.0._source.k8s\.pod\.label: "xyz" } + - match: { hits.hits.0._source.k8s\.pod\.unmapped: "xyz" } --- @@ -1427,6 +1434,7 @@ setup: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z mappings: + subobjects: false properties: "@timestamp": type: date @@ -1495,45 +1503,45 @@ setup: - match: { hits.hits.0._source._doc_count: 2 } - match: { hits.hits.0._source.metricset: pod } - - match: { hits.hits.0._source.k8s.pod.name: dog } - - match: { hits.hits.0._source.k8s.pod.value: 20 } - - match: { hits.hits.0._source.k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } - - match: { hits.hits.0._source.k8s.pod.label: foo } + - match: { hits.hits.0._source.k8s\.pod\.name: dog } + - match: { hits.hits.0._source.k8s\.pod\.value: 20 } + - match: { hits.hits.0._source.k8s\.pod\.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } + - match: { hits.hits.0._source.k8s\.pod\.label: foo } - match: { hits.hits.0._source.@timestamp: 2021-04-28T18:00:00.000Z } - match: { hits.hits.1._source._doc_count: 2 } - match: { hits.hits.1._source.metricset: pod } - - match: { hits.hits.1._source.k8s.pod.name: fox } - - match: { hits.hits.1._source.k8s.pod.value: 20 } - - match: { hits.hits.1._source.k8s.pod.uid: 7393ef8e-489c-11ee-be56-0242ac120002 } - - match: { hits.hits.1._source.k8s.pod.label: bar } + - match: { hits.hits.1._source.k8s\.pod\.name: fox } + - match: { hits.hits.1._source.k8s\.pod\.value: 20 } + - match: { hits.hits.1._source.k8s\.pod\.uid: 7393ef8e-489c-11ee-be56-0242ac120002 } + - match: { hits.hits.1._source.k8s\.pod\.label: bar } - match: { hits.hits.1._source.@timestamp: 2021-04-28T18:00:00.000Z } - match: { hits.hits.2._source._doc_count: 2 } - match: { hits.hits.2._source.metricset: pod } - - match: { hits.hits.2._source.k8s.pod.name: cat } - - match: { hits.hits.2._source.k8s.pod.value: 20 } - - match: { hits.hits.2._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + - match: { hits.hits.2._source.k8s\.pod\.name: cat } + - match: { hits.hits.2._source.k8s\.pod\.value: 20 } + - match: { hits.hits.2._source.k8s\.pod\.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } # NOTE: when downsampling a label field we propagate the last (most-recent timestamp-wise) non-null value, # ignoring/skipping null values. Here the last document has a value that hits ignore_above ("foofoo") and, # as a result, we propagate the value of the previous document ("foo") - - match: { hits.hits.2._source.k8s.pod.label: foo } + - match: { hits.hits.2._source.k8s\.pod\.label: foo } - match: { hits.hits.2._source.@timestamp: 2021-04-28T18:00:00.000Z } - match: { hits.hits.3._source._doc_count: 2 } - match: { hits.hits.3._source.metricset: pod } - - match: { hits.hits.3._source.k8s.pod.name: cow } - - match: { hits.hits.3._source.k8s.pod.value: 20 } - - match: { hits.hits.3._source.k8s.pod.uid: a81ef23a-489c-11ee-be56-0242ac120005 } - - match: { hits.hits.3._source.k8s.pod.label: null } + - match: { hits.hits.3._source.k8s\.pod\.name: cow } + - match: { hits.hits.3._source.k8s\.pod\.value: 20 } + - match: { hits.hits.3._source.k8s\.pod\.uid: a81ef23a-489c-11ee-be56-0242ac120005 } + - match: { hits.hits.3._source.k8s\.pod\.label: null } - match: { hits.hits.3._source.@timestamp: 2021-04-28T18:00:00.000Z } - do: indices.get_mapping: index: test-downsample-label-ignore-above - - match: { test-downsample-label-ignore-above.mappings.properties.k8s.properties.pod.properties.label.type: keyword } - - match: { test-downsample-label-ignore-above.mappings.properties.k8s.properties.pod.properties.label.ignore_above: 3 } + - match: { test-downsample-label-ignore-above.mappings.properties.k8s\.pod\.label.type: keyword } + - match: { test-downsample-label-ignore-above.mappings.properties.k8s\.pod\.label.ignore_above: 3 } --- "Downsample index with empty dimension": @@ -1555,6 +1563,7 @@ setup: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z mappings: + subobjects: false properties: "@timestamp": type: date @@ -1612,11 +1621,11 @@ setup: - length: { hits.hits: 2 } - match: { hits.hits.0._source._doc_count: 3 } - - match: { hits.hits.0._source.k8s.pod.name: cat } - - match: { hits.hits.0._source.k8s.pod.empty: null } + - match: { hits.hits.0._source.k8s\.pod\.name: cat } + - match: { hits.hits.0._source.k8s\.pod\.empty: null } - match: { hits.hits.1._source._doc_count: 1 } - - match: { hits.hits.1._source.k8s.pod.name: cat } - - match: { hits.hits.1._source.k8s.pod.empty: "" } + - match: { hits.hits.1._source.k8s\.pod\.name: cat } + - match: { hits.hits.1._source.k8s\.pod\.empty: "" } --- "Downsample index with empty dimension on routing path": @@ -1638,6 +1647,7 @@ setup: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z mappings: + subobjects: false properties: "@timestamp": type: date @@ -1695,8 +1705,8 @@ setup: - length: { hits.hits: 2 } - match: { hits.hits.0._source._doc_count: 3 } - - match: { hits.hits.0._source.k8s.pod.name: cat } - - match: { hits.hits.0._source.k8s.pod.empty: null } + - match: { hits.hits.0._source.k8s\.pod\.name: cat } + - match: { hits.hits.0._source.k8s\.pod\.empty: null } - match: { hits.hits.1._source._doc_count: 1 } - - match: { hits.hits.1._source.k8s.pod.name: cat } - - match: { hits.hits.1._source.k8s.pod.empty: "" } + - match: { hits.hits.1._source.k8s\.pod\.name: cat } + - match: { hits.hits.1._source.k8s\.pod\.empty: "" } From 97ed0a93bb75d0f920c976527f4f5fc0b6065beb Mon Sep 17 00:00:00 2001 From: shainaraskas <58563081+shainaraskas@users.noreply.github.com> Date: Thu, 24 Oct 2024 13:26:15 -0400 Subject: [PATCH 28/60] Make a minor change to trigger release note process (#113975) * changelog entry --- docs/changelog/113975.yaml | 19 +++++++++++++++++++ docs/reference/mapping/params/format.asciidoc | 4 ++-- 2 files changed, 21 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/113975.yaml diff --git a/docs/changelog/113975.yaml b/docs/changelog/113975.yaml new file mode 100644 index 0000000000000..632ba038271bb --- /dev/null +++ b/docs/changelog/113975.yaml @@ -0,0 +1,19 @@ +pr: 113975 +summary: JDK locale database change +area: Mapping +type: breaking +issues: [] +breaking: + title: JDK locale database change + area: Mapping + details: | + {es} 8.16 changes the version of the JDK that is included from version 22 to version 23. This changes the locale database that is used by Elasticsearch from the COMPAT database to the CLDR database. This change can cause significant differences to the textual date formats accepted by Elasticsearch, and to calculated week-dates. + + If you run {es} 8.16 on JDK version 22 or below, it will use the COMPAT locale database to match the behavior of 8.15. However, starting with {es} 9.0, {es} will use the CLDR database regardless of JDK version it is run on. + impact: | + This affects you if you use custom date formats using textual or week-date field specifiers. If you use date fields or calculated week-dates that change between the COMPAT and CLDR databases, then this change will cause Elasticsearch to reject previously valid date fields as invalid data. You might need to modify your ingest or output integration code to account for the differences between these two JDK versions. + + Starting in version 8.15.2, Elasticsearch will log deprecation warnings if you are using date format specifiers that might change on upgrading to JDK 23. These warnings are visible in Kibana. + + For detailed guidance, refer to <> and the https://ela.st/jdk-23-locales[Elastic blog]. + notable: true diff --git a/docs/reference/mapping/params/format.asciidoc b/docs/reference/mapping/params/format.asciidoc index 943e8fb879ff3..6c82b04eb5fe5 100644 --- a/docs/reference/mapping/params/format.asciidoc +++ b/docs/reference/mapping/params/format.asciidoc @@ -34,13 +34,13 @@ down to the nearest day. Completely customizable date formats are supported. The syntax for these is explained in https://docs.oracle.com/en/java/javase/21/docs/api/java.base/java/time/format/DateTimeFormatter.html[DateTimeFormatter docs]. -Note that whilst the built-in formats for week dates use the ISO definition of weekyears, +Note that while the built-in formats for week dates use the ISO definition of weekyears, custom formatters using the `Y`, `W`, or `w` field specifiers use the JDK locale definition of weekyears. This can result in different values between the built-in formats and custom formats for week dates. [[built-in-date-formats]] -==== Built In Formats +==== Built-in formats Most of the below formats have a `strict` companion format, which means that year, month and day parts of the month must use respectively 4, 2 and 2 digits From e951984831cc499f5f13efee0d6283ee8957f295 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 24 Oct 2024 11:40:59 -0700 Subject: [PATCH 29/60] Reenable CacheFileTests (#115582) The test issue was fixed by #110807 closes #110801 --- muted-tests.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index ab5d686a041c1..827a604cd6a19 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -23,9 +23,6 @@ tests: - class: org.elasticsearch.xpack.security.authz.store.NativePrivilegeStoreCacheTests method: testPopulationOfCacheWhenLoadingPrivilegesForAllApplications issue: https://github.com/elastic/elasticsearch/issues/110789 -- class: org.elasticsearch.xpack.searchablesnapshots.cache.common.CacheFileTests - method: testCacheFileCreatedAsSparseFile - issue: https://github.com/elastic/elasticsearch/issues/110801 - class: org.elasticsearch.nativeaccess.VectorSystemPropertyTests method: testSystemPropertyDisabled issue: https://github.com/elastic/elasticsearch/issues/110949 From ad9c5a0a0640f62f763f63682f7e321c4d68ab41 Mon Sep 17 00:00:00 2001 From: Pawan Kartik Date: Thu, 24 Oct 2024 20:15:17 +0100 Subject: [PATCH 30/60] Correctly update search status for a nonexistent local index (#115138) * fix: correctly update search status for a nonexistent local index * Check for cluster existence before updation * Remove unnecessary `println` * Address review comment: add an explanatory code comment * Further clarify code comment --- .../search/ccs/CrossClusterSearchIT.java | 64 +++++++++++++++++++ .../action/search/TransportSearchAction.java | 23 +++++++ 2 files changed, 87 insertions(+) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java index 5233a0cd564ef..5984e1acc89af 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java @@ -755,6 +755,70 @@ public void testNegativeRemoteIndexNameThrows() { assertNotNull(ee.getCause()); } + public void testClusterDetailsWhenLocalClusterHasNoMatchingIndex() throws Exception { + Map testClusterInfo = setupTwoClusters(); + String remoteIndex = (String) testClusterInfo.get("remote.index"); + int remoteNumShards = (Integer) testClusterInfo.get("remote.num_shards"); + + SearchRequest searchRequest = new SearchRequest("nomatch*", REMOTE_CLUSTER + ":" + remoteIndex); + if (randomBoolean()) { + searchRequest = searchRequest.scroll(TimeValue.timeValueMinutes(1)); + } + + searchRequest.allowPartialSearchResults(false); + if (randomBoolean()) { + searchRequest.setBatchedReduceSize(randomIntBetween(3, 20)); + } + + boolean minimizeRoundtrips = false; + searchRequest.setCcsMinimizeRoundtrips(minimizeRoundtrips); + + boolean dfs = randomBoolean(); + if (dfs) { + searchRequest.searchType(SearchType.DFS_QUERY_THEN_FETCH); + } + + if (randomBoolean()) { + searchRequest.setPreFilterShardSize(1); + } + + searchRequest.source(new SearchSourceBuilder().query(new MatchAllQueryBuilder()).size(10)); + assertResponse(client(LOCAL_CLUSTER).search(searchRequest), response -> { + assertNotNull(response); + + Clusters clusters = response.getClusters(); + assertFalse("search cluster results should BE successful", clusters.hasPartialResults()); + assertThat(clusters.getTotal(), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SUCCESSFUL), equalTo(2)); + assertThat(clusters.getClusterStateCount(Cluster.Status.SKIPPED), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.RUNNING), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.PARTIAL), equalTo(0)); + assertThat(clusters.getClusterStateCount(Cluster.Status.FAILED), equalTo(0)); + + Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertNotNull(localClusterSearchInfo); + assertThat(localClusterSearchInfo.getStatus(), equalTo(Cluster.Status.SUCCESSFUL)); + assertThat(localClusterSearchInfo.getIndexExpression(), equalTo("nomatch*")); + assertThat(localClusterSearchInfo.getTotalShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(localClusterSearchInfo.getTook().millis(), equalTo(0L)); + + Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteClusterSearchInfo); + assertThat(remoteClusterSearchInfo.getStatus(), equalTo(Cluster.Status.SUCCESSFUL)); + assertThat(remoteClusterSearchInfo.getIndexExpression(), equalTo(remoteIndex)); + assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThan(0L)); + }); + } + private static void assertOneFailedShard(Cluster cluster, int totalShards) { assertNotNull(cluster); assertThat(cluster.getStatus(), equalTo(Cluster.Status.PARTIAL)); diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 1645a378446a4..302c3e243a1f6 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -1247,6 +1247,29 @@ private void executeSearch( indicesAndAliases, concreteLocalIndices ); + + // localShardIterators is empty since there are no matching indices. In such cases, + // we update the local cluster's status from RUNNING to SUCCESSFUL right away. Before + // we attempt to do that, we must ensure that the local cluster was specified in the user's + // search request. This is done by trying to fetch the local cluster via getCluster() and + // checking for a non-null return value. If the local cluster was never specified, its status + // update can be skipped. + if (localShardIterators.isEmpty() + && clusters != SearchResponse.Clusters.EMPTY + && clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY) != null) { + clusters.swapCluster( + RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, + (alias, v) -> new SearchResponse.Cluster.Builder(v).setStatus(SearchResponse.Cluster.Status.SUCCESSFUL) + .setTotalShards(0) + .setSuccessfulShards(0) + .setSkippedShards(0) + .setFailedShards(0) + .setFailures(Collections.emptyList()) + .setTook(TimeValue.timeValueMillis(0)) + .setTimedOut(false) + .build() + ); + } } final GroupShardsIterator shardIterators = mergeShardsIterators(localShardIterators, remoteShardIterators); From 79cfcec065311165f7d491d164e99fed6c5cbeb9 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Thu, 24 Oct 2024 21:26:02 +0200 Subject: [PATCH 31/60] Clarify the null check for retention leases (#114979) `MetadataStateFormat.FORMAT.loadLatestState` can actually return null when the state directory hasn't been initialized yet, so we have to keep the null check when loading retention leases during the initialization of the engine. See #39359 --- .../java/org/elasticsearch/gateway/MetadataStateFormat.java | 2 ++ .../org/elasticsearch/index/seqno/ReplicationTracker.java | 5 ++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/gateway/MetadataStateFormat.java b/server/src/main/java/org/elasticsearch/gateway/MetadataStateFormat.java index 30b8d72b83f4c..3e68ec5243f5f 100644 --- a/server/src/main/java/org/elasticsearch/gateway/MetadataStateFormat.java +++ b/server/src/main/java/org/elasticsearch/gateway/MetadataStateFormat.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.lucene.store.InputStreamIndexInput; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; import org.elasticsearch.transport.Transports; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -485,6 +486,7 @@ public Tuple loadLatestStateWithGeneration(Logger logger, NamedXContent * @param dataLocations the data-locations to try. * @return the latest state or null if no state was found. */ + @Nullable public T loadLatestState(Logger logger, NamedXContentRegistry namedXContentRegistry, Path... dataLocations) throws IOException { return loadLatestStateWithGeneration(logger, namedXContentRegistry, dataLocations).v1(); } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index e67e878fd3827..f1e3ac270d959 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -22,7 +22,6 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.SuppressForbidden; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.gateway.WriteStateException; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersions; @@ -471,9 +470,9 @@ public RetentionLeases loadRetentionLeases(final Path path) throws IOException { return emptyIfNull(retentionLeases); } - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_INDEXING) private static RetentionLeases emptyIfNull(RetentionLeases retentionLeases) { - // we expect never to see a null in 8.x, so adjust this to throw an exception from v9 onwards. + // `MetadataStateFormat.FORMAT.loadLatestState` can actually return null when the state directory + // on a node hasn't been initialized yet return retentionLeases == null ? RetentionLeases.EMPTY : retentionLeases; } From b4edc3ddab0ea910582c0dd0091ed5b147048280 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Thu, 24 Oct 2024 21:26:23 +0200 Subject: [PATCH 32/60] Remove loading on-disk cluster metadata from the manifest file (#114698) Since metadata storage was moved to Lucene in #50907 (7.16.0), we shouldn't encounter any on-disk global metadata files, so we can remove support for loading them. --- .../gateway/GatewayIndexStateIT.java | 60 -------- .../gateway/GatewayMetaState.java | 13 -- .../gateway/MetaStateService.java | 119 +--------------- .../java/org/elasticsearch/node/Node.java | 1 - .../gateway/MetaStateServiceTests.java | 132 ------------------ .../gateway/MockGatewayMetaState.java | 8 -- 6 files changed, 1 insertion(+), 332 deletions(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index 00bd350fe2b84..cdd5a52e048bd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -17,7 +17,6 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.coordination.CoordinationMetadata; import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MappingMetadata; @@ -27,14 +26,9 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.UnassignedInfo; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.CheckedConsumer; -import org.elasticsearch.core.IOUtils; -import org.elasticsearch.env.BuildVersion; import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.env.NodeMetadata; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.indices.IndexClosedException; @@ -46,13 +40,8 @@ import org.elasticsearch.xcontent.XContentFactory; import java.io.IOException; -import java.nio.file.Path; import java.util.List; -import java.util.Map; import java.util.concurrent.TimeUnit; -import java.util.function.Function; -import java.util.stream.Collectors; -import java.util.stream.Stream; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; @@ -60,7 +49,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.notNullValue; @@ -545,52 +533,4 @@ public void testArchiveBrokenClusterSettings() throws Exception { assertHitCount(prepareSearch().setQuery(matchAllQuery()), 1L); } - public void testHalfDeletedIndexImport() throws Exception { - // It's possible for a 6.x node to add a tombstone for an index but not actually delete the index metadata from disk since that - // deletion is slightly deferred and may race against the node being shut down; if you upgrade to 7.x when in this state then the - // node won't start. - - final String nodeName = internalCluster().startNode(); - createIndex("test", 1, 0); - ensureGreen("test"); - - final Metadata metadata = internalCluster().getInstance(ClusterService.class).state().metadata(); - final Path[] paths = internalCluster().getInstance(NodeEnvironment.class).nodeDataPaths(); - final String nodeId = clusterAdmin().prepareNodesInfo(nodeName).clear().get().getNodes().get(0).getNode().getId(); - - writeBrokenMeta(nodeEnvironment -> { - for (final Path path : paths) { - IOUtils.rm(path.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME)); - } - MetaStateWriterUtils.writeGlobalState( - nodeEnvironment, - "test", - Metadata.builder(metadata) - // we remove the manifest file, resetting the term and making this look like an upgrade from 6.x, so must also reset the - // term in the coordination metadata - .coordinationMetadata(CoordinationMetadata.builder(metadata.coordinationMetadata()).term(0L).build()) - // add a tombstone but do not delete the index metadata from disk - .putCustom(IndexGraveyard.TYPE, IndexGraveyard.builder().addTombstone(metadata.index("test").getIndex()).build()) - .build() - ); - NodeMetadata.FORMAT.writeAndCleanup(new NodeMetadata(nodeId, BuildVersion.current(), metadata.oldestIndexVersion()), paths); - }); - - ensureGreen(); - - assertBusy(() -> assertThat(internalCluster().getInstance(NodeEnvironment.class).availableIndexFolders(), empty())); - } - - private void writeBrokenMeta(CheckedConsumer writer) throws Exception { - Map nodeEnvironments = Stream.of(internalCluster().getNodeNames()) - .collect(Collectors.toMap(Function.identity(), nodeName -> internalCluster().getInstance(NodeEnvironment.class, nodeName))); - internalCluster().fullRestart(new RestartCallback() { - @Override - public Settings onNodeStopped(String nodeName) throws Exception { - final NodeEnvironment nodeEnvironment = nodeEnvironments.get(nodeName); - writer.accept(nodeEnvironment); - return super.onNodeStopped(nodeName); - } - }); - } } diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index c863a5bac973a..a7baca59e1857 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -23,7 +23,6 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexMetadataVerifier; import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; -import org.elasticsearch.cluster.metadata.Manifest; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; @@ -33,8 +32,6 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor; import org.elasticsearch.core.IOUtils; -import org.elasticsearch.core.Tuple; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.env.BuildVersion; import org.elasticsearch.env.NodeMetadata; import org.elasticsearch.index.IndexVersions; @@ -185,16 +182,6 @@ private PersistedState createOnDiskPersistedState( long lastAcceptedVersion = onDiskState.lastAcceptedVersion; long currentTerm = onDiskState.currentTerm; - if (onDiskState.empty()) { - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) // legacy metadata loader is not needed anymore from v9 onwards - final Tuple legacyState = metaStateService.loadFullState(); - if (legacyState.v1().isEmpty() == false) { - metadata = legacyState.v2(); - lastAcceptedVersion = legacyState.v1().clusterStateVersion(); - currentTerm = legacyState.v1().currentTerm(); - } - } - PersistedState persistedState = null; boolean success = false; try { diff --git a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java index 4260ef51a3976..5f07deff31eea 100644 --- a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java +++ b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java @@ -12,22 +12,17 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Manifest; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.Tuple; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; import org.elasticsearch.xcontent.NamedXContentRegistry; import java.io.IOException; import java.util.ArrayList; -import java.util.HashMap; import java.util.List; -import java.util.Map; import java.util.function.Predicate; /** @@ -45,118 +40,6 @@ public MetaStateService(NodeEnvironment nodeEnv, NamedXContentRegistry namedXCon this.namedXContentRegistry = namedXContentRegistry; } - /** - * Loads the full state, which includes both the global state and all the indices meta data.
- * When loading, manifest file is consulted (represented by {@link Manifest} class), to load proper generations.
- * If there is no manifest file on disk, this method fallbacks to BWC mode, where latest generation of global and indices - * metadata is loaded. Please note that currently there is no way to distinguish between manifest file being removed and manifest - * file was not yet created. It means that this method always fallbacks to BWC mode, if there is no manifest file. - * - * @return tuple of {@link Manifest} and {@link Metadata} with global metadata and indices metadata. If there is no state on disk, - * meta state with globalGeneration -1 and empty meta data is returned. - * @throws IOException if some IOException when loading files occurs or there is no metadata referenced by manifest file. - */ - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) - public Tuple loadFullState() throws IOException { - final Manifest manifest = Manifest.FORMAT.loadLatestState(logger, namedXContentRegistry, nodeEnv.nodeDataPaths()); - if (manifest == null) { - return loadFullStateBWC(); - } - - final Metadata.Builder metadataBuilder; - if (manifest.isGlobalGenerationMissing()) { - metadataBuilder = Metadata.builder(); - } else { - final Metadata globalMetadata = Metadata.FORMAT.loadGeneration( - logger, - namedXContentRegistry, - manifest.globalGeneration(), - nodeEnv.nodeDataPaths() - ); - if (globalMetadata != null) { - metadataBuilder = Metadata.builder(globalMetadata); - } else { - throw new IOException("failed to find global metadata [generation: " + manifest.globalGeneration() + "]"); - } - } - - for (Map.Entry entry : manifest.indexGenerations().entrySet()) { - final Index index = entry.getKey(); - final long generation = entry.getValue(); - final String indexFolderName = index.getUUID(); - final IndexMetadata indexMetadata = IndexMetadata.FORMAT.loadGeneration( - logger, - namedXContentRegistry, - generation, - nodeEnv.resolveIndexFolder(indexFolderName) - ); - if (indexMetadata != null) { - metadataBuilder.put(indexMetadata, false); - } else { - throw new IOException( - "failed to find metadata for existing index " - + index.getName() - + " [location: " - + indexFolderName - + ", generation: " - + generation - + "]" - ); - } - } - - return new Tuple<>(manifest, metadataBuilder.build()); - } - - /** - * "Manifest-less" BWC version of loading metadata from disk. See also {@link #loadFullState()} - */ - private Tuple loadFullStateBWC() throws IOException { - Map indices = new HashMap<>(); - Metadata.Builder metadataBuilder; - - Tuple metadataAndGeneration = Metadata.FORMAT.loadLatestStateWithGeneration( - logger, - namedXContentRegistry, - nodeEnv.nodeDataPaths() - ); - Metadata globalMetadata = metadataAndGeneration.v1(); - long globalStateGeneration = metadataAndGeneration.v2(); - - final IndexGraveyard indexGraveyard; - if (globalMetadata != null) { - metadataBuilder = Metadata.builder(globalMetadata); - indexGraveyard = globalMetadata.custom(IndexGraveyard.TYPE); - } else { - metadataBuilder = Metadata.builder(); - indexGraveyard = IndexGraveyard.builder().build(); - } - - for (String indexFolderName : nodeEnv.availableIndexFolders()) { - Tuple indexMetadataAndGeneration = IndexMetadata.FORMAT.loadLatestStateWithGeneration( - logger, - namedXContentRegistry, - nodeEnv.resolveIndexFolder(indexFolderName) - ); - IndexMetadata indexMetadata = indexMetadataAndGeneration.v1(); - long generation = indexMetadataAndGeneration.v2(); - if (indexMetadata != null) { - if (indexGraveyard.containsIndex(indexMetadata.getIndex())) { - logger.debug("[{}] found metadata for deleted index [{}]", indexFolderName, indexMetadata.getIndex()); - // this index folder is cleared up when state is recovered - } else { - indices.put(indexMetadata.getIndex(), generation); - metadataBuilder.put(indexMetadata, false); - } - } else { - logger.debug("[{}] failed to find metadata for existing index location", indexFolderName); - } - } - - Manifest manifest = Manifest.unknownCurrentTermAndVersion(globalStateGeneration, indices); - return new Tuple<>(manifest, metadataBuilder.build()); - } - /** * Loads the index state for the provided index name, returning null if doesn't exists. */ @@ -193,7 +76,7 @@ List loadIndicesStates(Predicate excludeIndexPathIdsPredi } /** - * Loads the global state, *without* index state, see {@link #loadFullState()} for that. + * Loads the global state, *without* index state */ Metadata loadGlobalState() throws IOException { return Metadata.FORMAT.loadLatestState(logger, namedXContentRegistry, nodeEnv.nodeDataPaths()); diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index e30f76fdd9414..ec4a534fc883b 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -325,7 +325,6 @@ public Node start() throws NodeValidationException { // TODO: Do not expect that the legacy metadata file is always present https://github.com/elastic/elasticsearch/issues/95211 if (Assertions.ENABLED && DiscoveryNode.isStateless(settings()) == false) { try { - assert injector.getInstance(MetaStateService.class).loadFullState().v1().isEmpty(); final NodeMetadata nodeMetadata = NodeMetadata.FORMAT.loadLatestState( logger, NamedXContentRegistry.EMPTY, diff --git a/server/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java b/server/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java index 40c4e064216f1..1bbab8bf782bd 100644 --- a/server/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java @@ -9,21 +9,15 @@ package org.elasticsearch.gateway; import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.metadata.Manifest; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.Tuple; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.ESTestCase; -import java.io.IOException; -import java.util.HashMap; - import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.nullValue; public class MetaStateServiceTests extends ESTestCase { @@ -75,130 +69,4 @@ public void testWriteGlobalStateWithIndexAndNoIndexIsLoaded() throws Exception { assertThat(metaStateService.loadGlobalState().persistentSettings(), equalTo(metadata.persistentSettings())); assertThat(metaStateService.loadGlobalState().hasIndex("test1"), equalTo(false)); } - - public void testLoadFullStateBWC() throws Exception { - IndexMetadata indexMetadata = indexMetadata("test1"); - Metadata metadata = Metadata.builder() - .persistentSettings(Settings.builder().put("test1", "value1").build()) - .put(indexMetadata, true) - .build(); - - long globalGeneration = MetaStateWriterUtils.writeGlobalState(env, "test_write", metadata); - long indexGeneration = MetaStateWriterUtils.writeIndex(env, "test_write", indexMetadata); - - Tuple manifestAndMetadata = metaStateService.loadFullState(); - Manifest manifest = manifestAndMetadata.v1(); - assertThat(manifest.globalGeneration(), equalTo(globalGeneration)); - assertThat(manifest.indexGenerations(), hasKey(indexMetadata.getIndex())); - assertThat(manifest.indexGenerations().get(indexMetadata.getIndex()), equalTo(indexGeneration)); - - Metadata loadedMetadata = manifestAndMetadata.v2(); - assertThat(loadedMetadata.persistentSettings(), equalTo(metadata.persistentSettings())); - assertThat(loadedMetadata.hasIndex("test1"), equalTo(true)); - assertThat(loadedMetadata.index("test1"), equalTo(indexMetadata)); - } - - public void testLoadEmptyStateNoManifest() throws IOException { - Tuple manifestAndMetadata = metaStateService.loadFullState(); - - Manifest manifest = manifestAndMetadata.v1(); - assertTrue(manifest.isEmpty()); - - Metadata metadata = manifestAndMetadata.v2(); - assertTrue(Metadata.isGlobalStateEquals(metadata, Metadata.EMPTY_METADATA)); - } - - public void testLoadEmptyStateWithManifest() throws IOException { - Manifest manifest = Manifest.empty(); - MetaStateWriterUtils.writeManifestAndCleanup(env, "test", manifest); - - Tuple manifestAndMetadata = metaStateService.loadFullState(); - assertTrue(manifestAndMetadata.v1().isEmpty()); - Metadata metadata = manifestAndMetadata.v2(); - assertTrue(Metadata.isGlobalStateEquals(metadata, Metadata.EMPTY_METADATA)); - } - - public void testLoadFullStateMissingGlobalMetadata() throws IOException { - IndexMetadata index = indexMetadata("test1"); - long indexGeneration = MetaStateWriterUtils.writeIndex(env, "test", index); - Manifest manifest = new Manifest( - randomNonNegativeLong(), - randomNonNegativeLong(), - Manifest.empty().globalGeneration(), - new HashMap() { - { - put(index.getIndex(), indexGeneration); - } - } - ); - assertTrue(manifest.isGlobalGenerationMissing()); - MetaStateWriterUtils.writeManifestAndCleanup(env, "test", manifest); - - Tuple manifestAndMetadata = metaStateService.loadFullState(); - assertThat(manifestAndMetadata.v1(), equalTo(manifest)); - Metadata loadedMetadata = manifestAndMetadata.v2(); - assertTrue(Metadata.isGlobalStateEquals(loadedMetadata, Metadata.EMPTY_METADATA)); - assertThat(loadedMetadata.hasIndex("test1"), equalTo(true)); - assertThat(loadedMetadata.index("test1"), equalTo(index)); - } - - public void testLoadFullStateAndUpdateAndClean() throws IOException { - IndexMetadata index = indexMetadata("test1"); - Metadata metadata = Metadata.builder() - .persistentSettings(Settings.builder().put("test1", "value1").build()) - .put(index, true) - .build(); - - long globalGeneration = MetaStateWriterUtils.writeGlobalState(env, "first global state write", metadata); - long indexGeneration = MetaStateWriterUtils.writeIndex(env, "first index state write", index); - - Manifest manifest = new Manifest(randomNonNegativeLong(), randomNonNegativeLong(), globalGeneration, new HashMap() { - { - put(index.getIndex(), indexGeneration); - } - }); - MetaStateWriterUtils.writeManifestAndCleanup(env, "first manifest write", manifest); - - Metadata newMetadata = Metadata.builder() - .persistentSettings(Settings.builder().put("test1", "value2").build()) - .put(index, true) - .build(); - globalGeneration = MetaStateWriterUtils.writeGlobalState(env, "second global state write", newMetadata); - - Tuple manifestAndMetadata = metaStateService.loadFullState(); - assertThat(manifestAndMetadata.v1(), equalTo(manifest)); - - Metadata loadedMetadata = manifestAndMetadata.v2(); - assertThat(loadedMetadata.persistentSettings(), equalTo(metadata.persistentSettings())); - assertThat(loadedMetadata.hasIndex("test1"), equalTo(true)); - assertThat(loadedMetadata.index("test1"), equalTo(index)); - - manifest = new Manifest(randomNonNegativeLong(), randomNonNegativeLong(), globalGeneration, new HashMap() { - { - put(index.getIndex(), indexGeneration); - } - }); - - MetaStateWriterUtils.writeManifestAndCleanup(env, "second manifest write", manifest); - Metadata.FORMAT.cleanupOldFiles(globalGeneration, env.nodeDataPaths()); - IndexMetadata.FORMAT.cleanupOldFiles(indexGeneration, env.indexPaths(index.getIndex())); - - manifestAndMetadata = metaStateService.loadFullState(); - assertThat(manifestAndMetadata.v1(), equalTo(manifest)); - - loadedMetadata = manifestAndMetadata.v2(); - assertThat(loadedMetadata.persistentSettings(), equalTo(newMetadata.persistentSettings())); - assertThat(loadedMetadata.hasIndex("test1"), equalTo(true)); - assertThat(loadedMetadata.index("test1"), equalTo(index)); - - if (randomBoolean()) { - metaStateService.unreferenceAll(); - } else { - metaStateService.deleteAll(); - } - manifestAndMetadata = metaStateService.loadFullState(); - assertTrue(manifestAndMetadata.v1().isEmpty()); - metadata = manifestAndMetadata.v2(); - assertTrue(Metadata.isGlobalStateEquals(metadata, Metadata.EMPTY_METADATA)); - } } diff --git a/test/framework/src/main/java/org/elasticsearch/gateway/MockGatewayMetaState.java b/test/framework/src/main/java/org/elasticsearch/gateway/MockGatewayMetaState.java index d03396f9b53b3..64b468226e509 100644 --- a/test/framework/src/main/java/org/elasticsearch/gateway/MockGatewayMetaState.java +++ b/test/framework/src/main/java/org/elasticsearch/gateway/MockGatewayMetaState.java @@ -11,7 +11,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadataVerifier; -import org.elasticsearch.cluster.metadata.Manifest; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; @@ -19,14 +18,12 @@ import org.elasticsearch.cluster.version.CompatibilityVersionsUtils; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.Tuple; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.plugins.MetadataUpgrader; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xcontent.NamedXContentRegistry; -import java.io.IOException; import java.util.List; import static org.mockito.Mockito.mock; @@ -70,11 +67,6 @@ public void start(Settings settings, NodeEnvironment nodeEnvironment, NamedXCont new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) ); final MetaStateService metaStateService = mock(MetaStateService.class); - try { - when(metaStateService.loadFullState()).thenReturn(new Tuple<>(Manifest.empty(), Metadata.builder().build())); - } catch (IOException e) { - throw new AssertionError(e); - } start( settings, transportService, From e789039dfa8fee60dc2615c3876295ff7c6f3b01 Mon Sep 17 00:00:00 2001 From: Stanislav Malyshev Date: Thu, 24 Oct 2024 13:58:49 -0600 Subject: [PATCH 33/60] Fixing remote ENRICH by pushing the Enrich inside FragmentExec (#114665) * Fixing remote ENRICH by pushing the Enrich inside FragmentExec * Improve handling of more complex cases such as several enriches --- docs/changelog/114665.yaml | 6 ++ .../esql/action/CrossClustersEnrichIT.java | 102 ++++++++++++++++-- .../xpack/esql/analysis/Verifier.java | 7 -- .../xpack/esql/planner/Mapper.java | 42 ++++++++ .../optimizer/PhysicalPlanOptimizerTests.java | 63 +++++++++-- 5 files changed, 195 insertions(+), 25 deletions(-) create mode 100644 docs/changelog/114665.yaml diff --git a/docs/changelog/114665.yaml b/docs/changelog/114665.yaml new file mode 100644 index 0000000000000..b90bb799bd896 --- /dev/null +++ b/docs/changelog/114665.yaml @@ -0,0 +1,6 @@ +pr: 114665 +summary: Fixing remote ENRICH by pushing the Enrich inside `FragmentExec` +area: ES|QL +type: bug +issues: + - 105095 diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java index 7d8bb738098d3..e8e9f45694e9c 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java @@ -47,6 +47,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.Comparator; import java.util.List; import java.util.Locale; import java.util.Map; @@ -469,27 +470,112 @@ public void testEnrichRemoteWithVendor() { } } + public void testEnrichRemoteWithVendorNoSort() { + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + boolean responseExpectMeta = includeCCSMetadata.v2(); + + for (Enrich.Mode hostMode : List.of(Enrich.Mode.ANY, Enrich.Mode.REMOTE)) { + var query = String.format(Locale.ROOT, """ + FROM *:events,events + | LIMIT 100 + | eval ip= TO_STR(host) + | %s + | %s + | stats c = COUNT(*) by vendor + """, enrichHosts(hostMode), enrichVendors(Enrich.Mode.REMOTE)); + try (EsqlQueryResponse resp = runQuery(query, requestIncludeMeta)) { + var values = getValuesList(resp); + values.sort(Comparator.comparing(o -> (String) o.get(1), Comparator.nullsLast(Comparator.naturalOrder()))); + assertThat( + values, + equalTo( + List.of( + List.of(6L, "Apple"), + List.of(7L, "Microsoft"), + List.of(1L, "Redhat"), + List.of(2L, "Samsung"), + List.of(1L, "Sony"), + List.of(2L, "Suse"), + Arrays.asList(3L, (String) null) + ) + ) + ); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertThat(executionInfo.clusterAliases(), equalTo(Set.of("", "c1", "c2"))); + assertCCSExecutionInfoDetails(executionInfo); + } + } + } + public void testTopNThenEnrichRemote() { + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + boolean responseExpectMeta = includeCCSMetadata.v2(); + String query = String.format(Locale.ROOT, """ FROM *:events,events | eval ip= TO_STR(host) - | SORT ip + | SORT timestamp, user, ip | LIMIT 5 - | %s + | %s | KEEP host, timestamp, user, os """, enrichHosts(Enrich.Mode.REMOTE)); - var error = expectThrows(VerificationException.class, () -> runQuery(query, randomBoolean()).close()); - assertThat(error.getMessage(), containsString("ENRICH with remote policy can't be executed after LIMIT")); + try (EsqlQueryResponse resp = runQuery(query, requestIncludeMeta)) { + assertThat( + getValuesList(resp), + equalTo( + List.of( + List.of("192.168.1.2", 1L, "andres", "Windows"), + List.of("192.168.1.3", 1L, "matthew", "MacOS"), + Arrays.asList("192.168.1.25", 1L, "park", (String) null), + List.of("192.168.1.5", 2L, "akio", "Android"), + List.of("192.168.1.6", 2L, "sergio", "iOS") + ) + ) + ); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertThat(executionInfo.clusterAliases(), equalTo(Set.of("", "c1", "c2"))); + assertCCSExecutionInfoDetails(executionInfo); + } } public void testLimitThenEnrichRemote() { + Tuple includeCCSMetadata = randomIncludeCCSMetadata(); + Boolean requestIncludeMeta = includeCCSMetadata.v1(); + boolean responseExpectMeta = includeCCSMetadata.v2(); + String query = String.format(Locale.ROOT, """ FROM *:events,events - | LIMIT 10 + | LIMIT 25 | eval ip= TO_STR(host) - | %s + | %s | KEEP host, timestamp, user, os """, enrichHosts(Enrich.Mode.REMOTE)); - var error = expectThrows(VerificationException.class, () -> runQuery(query, randomBoolean()).close()); - assertThat(error.getMessage(), containsString("ENRICH with remote policy can't be executed after LIMIT")); + try (EsqlQueryResponse resp = runQuery(query, requestIncludeMeta)) { + var values = getValuesList(resp); + values.sort( + Comparator.comparingLong((List o) -> (Long) o.get(1)) + .thenComparing(o -> (String) o.get(0)) + .thenComparing(o -> (String) o.get(2)) + ); + assertThat( + values.subList(0, 5), + equalTo( + List.of( + List.of("192.168.1.2", 1L, "andres", "Windows"), + Arrays.asList("192.168.1.25", 1L, "park", (String) null), + List.of("192.168.1.3", 1L, "matthew", "MacOS"), + List.of("192.168.1.5", 2L, "akio", "Android"), + List.of("192.168.1.5", 2L, "simon", "Android") + ) + ) + ); + EsqlExecutionInfo executionInfo = resp.getExecutionInfo(); + assertThat(executionInfo.includeCCSMetadata(), equalTo(responseExpectMeta)); + assertThat(executionInfo.clusterAliases(), equalTo(Set.of("", "c1", "c2"))); + assertCCSExecutionInfoDetails(executionInfo); + } } public void testAggThenEnrichRemote() { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java index e2717cd9af0d1..fbaf43467a2e7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java @@ -609,22 +609,15 @@ private static void checkForSortableDataTypes(LogicalPlan p, Set localF */ private static void checkRemoteEnrich(LogicalPlan plan, Set failures) { boolean[] agg = { false }; - boolean[] limit = { false }; boolean[] enrichCoord = { false }; plan.forEachUp(UnaryPlan.class, u -> { - if (u instanceof Limit) { - limit[0] = true; // TODO: Make Limit then enrich_remote work - } if (u instanceof Aggregate) { agg[0] = true; } else if (u instanceof Enrich enrich && enrich.mode() == Enrich.Mode.COORDINATOR) { enrichCoord[0] = true; } if (u instanceof Enrich enrich && enrich.mode() == Enrich.Mode.REMOTE) { - if (limit[0]) { - failures.add(fail(enrich, "ENRICH with remote policy can't be executed after LIMIT")); - } if (agg[0]) { failures.add(fail(enrich, "ENRICH with remote policy can't be executed after STATS")); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java index e571be54692c4..152c492a34433 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java @@ -52,8 +52,10 @@ import org.elasticsearch.xpack.esql.plan.physical.RowExec; import org.elasticsearch.xpack.esql.plan.physical.ShowExec; import org.elasticsearch.xpack.esql.plan.physical.TopNExec; +import org.elasticsearch.xpack.esql.plan.physical.UnaryExec; import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; /** *

This class is part of the planner

@@ -104,6 +106,46 @@ public PhysicalPlan map(LogicalPlan p) { // // Unary Plan // + if (localMode == false && p instanceof Enrich enrich && enrich.mode() == Enrich.Mode.REMOTE) { + // When we have remote enrich, we want to put it under FragmentExec, so it would be executed remotely. + // We're only going to do it on the coordinator node. + // The way we're going to do it is as follows: + // 1. Locate FragmentExec in the tree. If we have no FragmentExec, we won't do anything. + // 2. Put this Enrich under it, removing everything that was below it previously. + // 3. Above FragmentExec, we should deal with pipeline breakers, since pipeline ops already are supposed to go under + // FragmentExec. + // 4. Aggregates can't appear here since the plan should have errored out if we have aggregate inside remote Enrich. + // 5. So we should be keeping: LimitExec, ExchangeExec, OrderExec, TopNExec (actually OrderExec probably can't happen anyway). + + var child = map(enrich.child()); + AtomicBoolean hasFragment = new AtomicBoolean(false); + + var childTransformed = child.transformUp((f) -> { + // Once we reached FragmentExec, we stuff our Enrich under it + if (f instanceof FragmentExec) { + hasFragment.set(true); + return new FragmentExec(p); + } + if (f instanceof EnrichExec enrichExec) { + // It can only be ANY because COORDINATOR would have errored out earlier, and REMOTE should be under FragmentExec + assert enrichExec.mode() == Enrich.Mode.ANY : "enrich must be in ANY mode here"; + return enrichExec.child(); + } + if (f instanceof UnaryExec unaryExec) { + if (f instanceof LimitExec || f instanceof ExchangeExec || f instanceof OrderExec || f instanceof TopNExec) { + return f; + } else { + return unaryExec.child(); + } + } + // Currently, it's either UnaryExec or LeafExec. Leaf will either resolve to FragmentExec or we'll ignore it. + return f; + }); + + if (hasFragment.get()) { + return childTransformed; + } + } if (p instanceof UnaryPlan ua) { var child = map(ua.child()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java index 964039268e30d..961c70acada7b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java @@ -172,7 +172,7 @@ import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.startsWith; -// @TestLogging(value = "org.elasticsearch.xpack.esql:TRACE", reason = "debug") +// @TestLogging(value = "org.elasticsearch.xpack.esql:DEBUG", reason = "debug") public class PhysicalPlanOptimizerTests extends ESTestCase { private static final String PARAM_FORMATTING = "%1$s"; @@ -5851,14 +5851,14 @@ public void testEnrichBeforeLimit() { | EVAL employee_id = to_str(emp_no) | ENRICH _remote:departments | LIMIT 10"""); - var enrich = as(plan, EnrichExec.class); - assertThat(enrich.mode(), equalTo(Enrich.Mode.REMOTE)); - assertThat(enrich.concreteIndices(), equalTo(Map.of("cluster_1", ".enrich-departments-2"))); - var eval = as(enrich.child(), EvalExec.class); - var finalLimit = as(eval.child(), LimitExec.class); + var finalLimit = as(plan, LimitExec.class); var exchange = as(finalLimit.child(), ExchangeExec.class); var fragment = as(exchange.child(), FragmentExec.class); - var partialLimit = as(fragment.fragment(), Limit.class); + var enrich = as(fragment.fragment(), Enrich.class); + assertThat(enrich.mode(), equalTo(Enrich.Mode.REMOTE)); + assertThat(enrich.concreteIndices(), equalTo(Map.of("cluster_1", ".enrich-departments-2"))); + var evalFragment = as(enrich.child(), Eval.class); + var partialLimit = as(evalFragment.child(), Limit.class); as(partialLimit.child(), EsRelation.class); } } @@ -5901,13 +5901,21 @@ public void testLimitThenEnrich() { } public void testLimitThenEnrichRemote() { - var error = expectThrows(VerificationException.class, () -> physicalPlan(""" + var plan = physicalPlan(""" FROM test | LIMIT 10 | EVAL employee_id = to_str(emp_no) | ENRICH _remote:departments - """)); - assertThat(error.getMessage(), containsString("line 4:3: ENRICH with remote policy can't be executed after LIMIT")); + """); + var finalLimit = as(plan, LimitExec.class); + var exchange = as(finalLimit.child(), ExchangeExec.class); + var fragment = as(exchange.child(), FragmentExec.class); + var enrich = as(fragment.fragment(), Enrich.class); + assertThat(enrich.mode(), equalTo(Enrich.Mode.REMOTE)); + assertThat(enrich.concreteIndices(), equalTo(Map.of("cluster_1", ".enrich-departments-2"))); + var evalFragment = as(enrich.child(), Eval.class); + var partialLimit = as(evalFragment.child(), Limit.class); + as(partialLimit.child(), EsRelation.class); } public void testEnrichBeforeTopN() { @@ -5961,6 +5969,23 @@ public void testEnrichBeforeTopN() { var eval = as(enrich.child(), Eval.class); as(eval.child(), EsRelation.class); } + { + var plan = physicalPlan(""" + FROM test + | EVAL employee_id = to_str(emp_no) + | ENRICH _remote:departments + | SORT department + | LIMIT 10"""); + var topN = as(plan, TopNExec.class); + var exchange = as(topN.child(), ExchangeExec.class); + var fragment = as(exchange.child(), FragmentExec.class); + var partialTopN = as(fragment.fragment(), TopN.class); + var enrich = as(partialTopN.child(), Enrich.class); + assertThat(enrich.mode(), equalTo(Enrich.Mode.REMOTE)); + assertThat(enrich.concreteIndices(), equalTo(Map.of("cluster_1", ".enrich-departments-2"))); + var eval = as(enrich.child(), Eval.class); + as(eval.child(), EsRelation.class); + } } public void testEnrichAfterTopN() { @@ -6000,6 +6025,24 @@ public void testEnrichAfterTopN() { var partialTopN = as(fragment.fragment(), TopN.class); as(partialTopN.child(), EsRelation.class); } + { + var plan = physicalPlan(""" + FROM test + | SORT emp_no + | LIMIT 10 + | EVAL employee_id = to_str(emp_no) + | ENRICH _remote:departments + """); + var topN = as(plan, TopNExec.class); + var exchange = as(topN.child(), ExchangeExec.class); + var fragment = as(exchange.child(), FragmentExec.class); + var enrich = as(fragment.fragment(), Enrich.class); + assertThat(enrich.mode(), equalTo(Enrich.Mode.REMOTE)); + assertThat(enrich.concreteIndices(), equalTo(Map.of("cluster_1", ".enrich-departments-2"))); + var evalFragment = as(enrich.child(), Eval.class); + var partialTopN = as(evalFragment.child(), TopN.class); + as(partialTopN.child(), EsRelation.class); + } } public void testManyEnrich() { From cade0021736d69f66db4bc73c022258833c3ff38 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Thu, 24 Oct 2024 15:34:27 -0500 Subject: [PATCH 34/60] Fixing ingest simulate yaml rest test when global legacy template is present (#115586) Sometimes the test framework adds a global legacy template. When this happens, a test that is using another legacy template to create an index emits a warning since the index matches two legacy templates. This PR allows that warning. --- .../resources/rest-api-spec/test/ingest/80_ingest_simulate.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml index 7ed5ad3154151..2d3fa6b568381 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml @@ -1537,6 +1537,8 @@ setup: - not_exists: docs.0.doc.error - do: + allowed_warnings: + - "index [foo-1] matches multiple legacy templates [global, my-legacy-template], composable templates will only match a single template" indices.create: index: foo-1 - match: { acknowledged: true } From d1c7e9886f483f2865b7780ce0ba44689fae622e Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Thu, 24 Oct 2024 21:43:22 +0100 Subject: [PATCH 35/60] Update BlobCacheBufferedIndexInput::readVLong to correctly handle negative long values (#115594) --- docs/changelog/115594.yaml | 6 ++++++ .../blobcache/common/BlobCacheBufferedIndexInput.java | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/115594.yaml diff --git a/docs/changelog/115594.yaml b/docs/changelog/115594.yaml new file mode 100644 index 0000000000000..91a6089dfb3ce --- /dev/null +++ b/docs/changelog/115594.yaml @@ -0,0 +1,6 @@ +pr: 115594 +summary: Update `BlobCacheBufferedIndexInput::readVLong` to correctly handle negative + long values +area: Search +type: bug +issues: [] diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/BlobCacheBufferedIndexInput.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/BlobCacheBufferedIndexInput.java index 16645e7523c36..7e7e954d1fa72 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/BlobCacheBufferedIndexInput.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/BlobCacheBufferedIndexInput.java @@ -175,7 +175,7 @@ public final int readVInt() throws IOException { @Override public final long readVLong() throws IOException { - if (9 <= buffer.remaining()) { + if (10 <= buffer.remaining()) { return ByteBufferStreamInput.readVLong(buffer); } else { return super.readVLong(); From f444c86f857db0f82f528d217bf0da6f5b9308c5 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 24 Oct 2024 13:47:20 -0700 Subject: [PATCH 36/60] Add lookup index mode (#115143) This change introduces a new index mode, lookup, for indices intended for lookup operations in ES|QL. Lookup indices must have a single shard and be replicated to all data nodes by default. Aside from these requirements, they function as standard indices. Documentation will be added later when the lookup operator in ES|QL is implemented. --- .../test/indices.create/10_basic.yml | 67 ++++++ .../index/LookupIndexModeIT.java | 219 ++++++++++++++++++ .../org/elasticsearch/TransportVersions.java | 1 + .../metadata/MetadataCreateIndexService.java | 16 +- .../org/elasticsearch/index/IndexMode.java | 115 ++++++++- .../monitor/metrics/IndicesMetrics.java | 2 +- .../elasticsearch/node/NodeConstruction.java | 10 +- .../indices/CreateIndexCapabilities.java | 7 +- .../index/mapper/MapperServiceTestCase.java | 2 +- .../index/engine/FollowingEngineTests.java | 3 + 10 files changed, 436 insertions(+), 6 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/elasticsearch/index/LookupIndexModeIT.java diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml index 8242b7cdd29e7..31d127b80c844 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml @@ -149,3 +149,70 @@ indices.exists_alias: name: logs_2022-12-31 - is_true: '' + +--- +"Create lookup index": + - requires: + test_runner_features: [ capabilities ] + capabilities: + - method: PUT + path: /{index} + capabilities: [ lookup_index_mode ] + reason: "Support for 'lookup' index mode capability required" + - do: + indices.create: + index: "test_lookup" + body: + settings: + index.mode: lookup + + - do: + indices.get_settings: + index: test_lookup + + - match: { test_lookup.settings.index.number_of_shards: "1"} + - match: { test_lookup.settings.index.auto_expand_replicas: "0-all"} + +--- +"Create lookup index with one shard": + - requires: + test_runner_features: [ capabilities ] + capabilities: + - method: PUT + path: /{index} + capabilities: [ lookup_index_mode ] + reason: "Support for 'lookup' index mode capability required" + - do: + indices.create: + index: "test_lookup" + body: + settings: + index: + mode: lookup + number_of_shards: 1 + + - do: + indices.get_settings: + index: test_lookup + + - match: { test_lookup.settings.index.number_of_shards: "1"} + - match: { test_lookup.settings.index.auto_expand_replicas: "0-all"} + +--- +"Create lookup index with two shards": + - requires: + test_runner_features: [ capabilities ] + capabilities: + - method: PUT + path: /{index} + capabilities: [ lookup_index_mode ] + reason: "Support for 'lookup' index mode capability required" + - do: + catch: /illegal_argument_exception/ + indices.create: + index: test_lookup + body: + settings: + index.mode: lookup + index.number_of_shards: 2 + diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/LookupIndexModeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/LookupIndexModeIT.java new file mode 100644 index 0000000000000..f294d4a2e7943 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/LookupIndexModeIT.java @@ -0,0 +1,219 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index; + +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; +import org.elasticsearch.action.admin.indices.shrink.ResizeAction; +import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; +import org.elasticsearch.action.admin.indices.shrink.ResizeType; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesIndexResponse; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.MatchQueryBuilder; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.Map; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +public class LookupIndexModeIT extends ESIntegTestCase { + + @Override + protected int numberOfShards() { + return 1; + } + + public void testBasic() { + internalCluster().ensureAtLeastNumDataNodes(1); + Settings.Builder lookupSettings = Settings.builder().put("index.mode", "lookup"); + if (randomBoolean()) { + lookupSettings.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1); + } + CreateIndexRequest createRequest = new CreateIndexRequest("hosts"); + createRequest.settings(lookupSettings); + createRequest.simpleMapping("ip", "type=ip", "os", "type=keyword"); + assertAcked(client().admin().indices().execute(TransportCreateIndexAction.TYPE, createRequest)); + Settings settings = client().admin().indices().prepareGetSettings("hosts").get().getIndexToSettings().get("hosts"); + assertThat(settings.get("index.mode"), equalTo("lookup")); + assertThat(settings.get("index.auto_expand_replicas"), equalTo("0-all")); + Map allHosts = Map.of( + "192.168.1.2", + "Windows", + "192.168.1.3", + "MacOS", + "192.168.1.4", + "Linux", + "192.168.1.5", + "Android", + "192.168.1.6", + "iOS", + "192.168.1.7", + "Windows", + "192.168.1.8", + "MacOS", + "192.168.1.9", + "Linux", + "192.168.1.10", + "Linux", + "192.168.1.11", + "Windows" + ); + for (Map.Entry e : allHosts.entrySet()) { + client().prepareIndex("hosts").setSource("ip", e.getKey(), "os", e.getValue()).get(); + } + refresh("hosts"); + assertAcked(client().admin().indices().prepareCreate("events").setSettings(Settings.builder().put("index.mode", "logsdb")).get()); + int numDocs = between(1, 10); + for (int i = 0; i < numDocs; i++) { + String ip = randomFrom(allHosts.keySet()); + String message = randomFrom("login", "logout", "shutdown", "restart"); + client().prepareIndex("events").setSource("@timestamp", "2024-01-01", "ip", ip, "message", message).get(); + } + refresh("events"); + // _search + { + SearchResponse resp = prepareSearch("events", "hosts").setQuery(new MatchQueryBuilder("_index_mode", "lookup")) + .setSize(10000) + .get(); + for (SearchHit hit : resp.getHits()) { + assertThat(hit.getIndex(), equalTo("hosts")); + } + assertHitCount(resp, allHosts.size()); + resp.decRef(); + } + // field_caps + { + FieldCapabilitiesRequest request = new FieldCapabilitiesRequest(); + request.indices("events", "hosts"); + request.fields("*"); + request.setMergeResults(false); + request.indexFilter(new MatchQueryBuilder("_index_mode", "lookup")); + var resp = client().fieldCaps(request).actionGet(); + assertThat(resp.getIndexResponses(), hasSize(1)); + FieldCapabilitiesIndexResponse indexResponse = resp.getIndexResponses().getFirst(); + assertThat(indexResponse.getIndexMode(), equalTo(IndexMode.LOOKUP)); + assertThat(indexResponse.getIndexName(), equalTo("hosts")); + } + } + + public void testRejectMoreThanOneShard() { + int numberOfShards = between(2, 5); + IllegalArgumentException error = expectThrows(IllegalArgumentException.class, () -> { + client().admin() + .indices() + .prepareCreate("hosts") + .setSettings(Settings.builder().put("index.mode", "lookup").put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numberOfShards)) + .setMapping("ip", "type=ip", "os", "type=keyword") + .get(); + }); + assertThat( + error.getMessage(), + equalTo("index with [lookup] mode must have [index.number_of_shards] set to 1 or unset; provided " + numberOfShards) + ); + } + + public void testResizeLookupIndex() { + Settings.Builder createSettings = Settings.builder().put("index.mode", "lookup"); + if (randomBoolean()) { + createSettings.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1); + } + CreateIndexRequest createIndexRequest = new CreateIndexRequest("lookup-1").settings(createSettings); + assertAcked(client().admin().indices().execute(TransportCreateIndexAction.TYPE, createIndexRequest)); + client().admin().indices().prepareAddBlock(IndexMetadata.APIBlock.WRITE, "lookup-1").get(); + + ResizeRequest clone = new ResizeRequest("lookup-2", "lookup-1"); + clone.setResizeType(ResizeType.CLONE); + assertAcked(client().admin().indices().execute(ResizeAction.INSTANCE, clone).actionGet()); + Settings settings = client().admin().indices().prepareGetSettings("lookup-2").get().getIndexToSettings().get("lookup-2"); + assertThat(settings.get("index.mode"), equalTo("lookup")); + assertThat(settings.get("index.number_of_shards"), equalTo("1")); + assertThat(settings.get("index.auto_expand_replicas"), equalTo("0-all")); + + ResizeRequest split = new ResizeRequest("lookup-3", "lookup-1"); + split.setResizeType(ResizeType.SPLIT); + split.getTargetIndexRequest().settings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3)); + IllegalArgumentException error = expectThrows( + IllegalArgumentException.class, + () -> client().admin().indices().execute(ResizeAction.INSTANCE, split).actionGet() + ); + assertThat( + error.getMessage(), + equalTo("index with [lookup] mode must have [index.number_of_shards] set to 1 or unset; provided 3") + ); + } + + public void testResizeRegularIndexToLookup() { + String dataNode = internalCluster().startDataOnlyNode(); + assertAcked( + client().admin() + .indices() + .prepareCreate("regular-1") + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) + .put("index.routing.allocation.require._name", dataNode) + ) + .setMapping("ip", "type=ip", "os", "type=keyword") + .get() + ); + client().admin().indices().prepareAddBlock(IndexMetadata.APIBlock.WRITE, "regular-1").get(); + client().admin() + .indices() + .prepareUpdateSettings("regular-1") + .setSettings(Settings.builder().put("index.number_of_replicas", 0)) + .get(); + + ResizeRequest clone = new ResizeRequest("lookup-3", "regular-1"); + clone.setResizeType(ResizeType.CLONE); + clone.getTargetIndexRequest().settings(Settings.builder().put("index.mode", "lookup")); + IllegalArgumentException error = expectThrows( + IllegalArgumentException.class, + () -> client().admin().indices().execute(ResizeAction.INSTANCE, clone).actionGet() + ); + assertThat( + error.getMessage(), + equalTo("index with [lookup] mode must have [index.number_of_shards] set to 1 or unset; provided 2") + ); + + ResizeRequest shrink = new ResizeRequest("lookup-4", "regular-1"); + shrink.setResizeType(ResizeType.SHRINK); + shrink.getTargetIndexRequest() + .settings(Settings.builder().put("index.mode", "lookup").put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)); + + error = expectThrows( + IllegalArgumentException.class, + () -> client().admin().indices().execute(ResizeAction.INSTANCE, shrink).actionGet() + ); + assertThat(error.getMessage(), equalTo("can't change index.mode of index [regular-1] from [standard] to [lookup]")); + } + + public void testDoNotOverrideAutoExpandReplicas() { + internalCluster().ensureAtLeastNumDataNodes(1); + Settings.Builder createSettings = Settings.builder().put("index.mode", "lookup"); + if (randomBoolean()) { + createSettings.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1); + } + createSettings.put("index.auto_expand_replicas", "3-5"); + CreateIndexRequest createRequest = new CreateIndexRequest("hosts"); + createRequest.settings(createSettings); + createRequest.simpleMapping("ip", "type=ip", "os", "type=keyword"); + assertAcked(client().admin().indices().execute(TransportCreateIndexAction.TYPE, createRequest)); + Settings settings = client().admin().indices().prepareGetSettings("hosts").get().getIndexToSettings().get("hosts"); + assertThat(settings.get("index.mode"), equalTo("lookup")); + assertThat(settings.get("index.auto_expand_replicas"), equalTo("3-5")); + } +} diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 777ff083f33f8..25bb792d827a9 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -181,6 +181,7 @@ static TransportVersion def(int id) { public static final TransportVersion INFERENCE_DONT_PERSIST_ON_READ = def(8_776_00_0); public static final TransportVersion SIMULATE_MAPPING_ADDITION = def(8_777_00_0); public static final TransportVersion INTRODUCE_ALL_APPLICABLE_SELECTOR = def(8_778_00_0); + public static final TransportVersion INDEX_MODE_LOOKUP = def(8_779_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java index 69e3b7b70ff82..ed029db54bf06 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java @@ -308,7 +308,12 @@ private void onlyCreateIndex( final CreateIndexClusterStateUpdateRequest request, final ActionListener listener ) { - normalizeRequestSetting(request); + try { + normalizeRequestSetting(request); + } catch (Exception e) { + listener.onFailure(e); + return; + } var delegate = new AllocationActionListener<>(listener, threadPool.getThreadContext()); submitUnbatchedTask( @@ -1599,6 +1604,15 @@ static IndexMetadata validateResize( // of if the source shards are divisible by the number of target shards IndexMetadata.getRoutingFactor(sourceMetadata.getNumberOfShards(), INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings)); } + if (targetIndexSettings.hasValue(IndexSettings.MODE.getKey())) { + IndexMode oldMode = Objects.requireNonNullElse(sourceMetadata.getIndexMode(), IndexMode.STANDARD); + IndexMode newMode = IndexSettings.MODE.get(targetIndexSettings); + if (newMode != oldMode) { + throw new IllegalArgumentException( + "can't change index.mode of index [" + sourceIndex + "] from [" + oldMode + "] to [" + newMode + "]" + ); + } + } return sourceMetadata; } diff --git a/server/src/main/java/org/elasticsearch/index/IndexMode.java b/server/src/main/java/org/elasticsearch/index/IndexMode.java index 75ec67f26dd3a..e6339344b6e5f 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexMode.java +++ b/server/src/main/java/org/elasticsearch/index/IndexMode.java @@ -9,7 +9,9 @@ package org.elasticsearch.index; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.MetadataCreateDataStreamService; import org.elasticsearch.cluster.routing.IndexRouting; import org.elasticsearch.common.compress.CompressedXContent; @@ -37,8 +39,10 @@ import org.elasticsearch.index.mapper.TsidExtractingIdFieldMapper; import java.io.IOException; +import java.time.Instant; import java.util.Arrays; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.function.BooleanSupplier; @@ -308,6 +312,78 @@ public SourceFieldMapper.Mode defaultSourceMode() { public String getDefaultCodec() { return CodecService.BEST_COMPRESSION_CODEC; } + }, + LOOKUP("lookup") { + @Override + void validateWithOtherSettings(Map, Object> settings) { + final Integer providedNumberOfShards = (Integer) settings.get(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING); + if (providedNumberOfShards != null && providedNumberOfShards != 1) { + throw new IllegalArgumentException( + "index with [lookup] mode must have [index.number_of_shards] set to 1 or unset; provided " + providedNumberOfShards + ); + } + } + + @Override + public void validateMapping(MappingLookup lookup) {}; + + @Override + public void validateAlias(@Nullable String indexRouting, @Nullable String searchRouting) {} + + @Override + public void validateTimestampFieldMapping(boolean isDataStream, MappingLookup mappingLookup) { + + } + + @Override + public CompressedXContent getDefaultMapping(final IndexSettings indexSettings) { + return null; + } + + @Override + public TimestampBounds getTimestampBound(IndexMetadata indexMetadata) { + return null; + } + + @Override + public MetadataFieldMapper timeSeriesIdFieldMapper() { + // non time-series indices must not have a TimeSeriesIdFieldMapper + return null; + } + + @Override + public MetadataFieldMapper timeSeriesRoutingHashFieldMapper() { + // non time-series indices must not have a TimeSeriesRoutingIdFieldMapper + return null; + } + + @Override + public IdFieldMapper idFieldMapperWithoutFieldData() { + return ProvidedIdFieldMapper.NO_FIELD_DATA; + } + + @Override + public IdFieldMapper buildIdFieldMapper(BooleanSupplier fieldDataEnabled) { + return new ProvidedIdFieldMapper(fieldDataEnabled); + } + + @Override + public DocumentDimensions buildDocumentDimensions(IndexSettings settings) { + return DocumentDimensions.Noop.INSTANCE; + } + + @Override + public boolean shouldValidateTimestamp() { + return false; + } + + @Override + public void validateSourceFieldMapper(SourceFieldMapper sourceFieldMapper) {} + + @Override + public SourceFieldMapper.Mode defaultSourceMode() { + return SourceFieldMapper.Mode.STORED; + } }; private static final String HOST_NAME = "host.name"; @@ -370,6 +446,7 @@ private static CompressedXContent createDefaultMapping(boolean includeHostName) static final List> VALIDATE_WITH_SETTINGS = List.copyOf( Stream.concat( Stream.of( + IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING, IndexMetadata.INDEX_ROUTING_PARTITION_SIZE_SETTING, IndexMetadata.INDEX_ROUTING_PATH, IndexSettings.TIME_SERIES_START_TIME, @@ -476,11 +553,12 @@ public static IndexMode fromString(String value) { case "standard" -> IndexMode.STANDARD; case "time_series" -> IndexMode.TIME_SERIES; case "logsdb" -> IndexMode.LOGSDB; + case "lookup" -> IndexMode.LOOKUP; default -> throw new IllegalArgumentException( "[" + value + "] is an invalid index mode, valid modes are: [" - + Arrays.stream(IndexMode.values()).map(IndexMode::toString).collect(Collectors.joining()) + + Arrays.stream(IndexMode.values()).map(IndexMode::toString).collect(Collectors.joining(",")) + "]" ); }; @@ -492,6 +570,7 @@ public static IndexMode readFrom(StreamInput in) throws IOException { case 0 -> STANDARD; case 1 -> TIME_SERIES; case 2 -> LOGSDB; + case 3 -> LOOKUP; default -> throw new IllegalStateException("unexpected index mode [" + mode + "]"); }; } @@ -501,6 +580,7 @@ public static void writeTo(IndexMode indexMode, StreamOutput out) throws IOExcep case STANDARD -> 0; case TIME_SERIES -> 1; case LOGSDB -> 2; + case LOOKUP -> out.getTransportVersion().onOrAfter(TransportVersions.INDEX_MODE_LOOKUP) ? 3 : 0; }; out.writeByte((byte) code); } @@ -509,4 +589,37 @@ public static void writeTo(IndexMode indexMode, StreamOutput out) throws IOExcep public String toString() { return getName(); } + + /** + * A built-in index setting provider that supplies additional index settings based on the index mode. + * Currently, only the lookup index mode provides non-empty additional settings. + */ + public static final class IndexModeSettingsProvider implements IndexSettingProvider { + @Override + public Settings getAdditionalIndexSettings( + String indexName, + String dataStreamName, + IndexMode templateIndexMode, + Metadata metadata, + Instant resolvedAt, + Settings indexTemplateAndCreateRequestSettings, + List combinedTemplateMappings + ) { + IndexMode indexMode = templateIndexMode; + if (indexMode == null) { + String modeName = indexTemplateAndCreateRequestSettings.get(IndexSettings.MODE.getKey()); + if (modeName != null) { + indexMode = IndexMode.valueOf(modeName.toUpperCase(Locale.ROOT)); + } + } + if (indexMode == LOOKUP) { + return Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "0-all") + .build(); + } else { + return Settings.EMPTY; + } + } + } } diff --git a/server/src/main/java/org/elasticsearch/monitor/metrics/IndicesMetrics.java b/server/src/main/java/org/elasticsearch/monitor/metrics/IndicesMetrics.java index 11df8710fad6c..ba67bc03e1441 100644 --- a/server/src/main/java/org/elasticsearch/monitor/metrics/IndicesMetrics.java +++ b/server/src/main/java/org/elasticsearch/monitor/metrics/IndicesMetrics.java @@ -55,7 +55,7 @@ public IndicesMetrics(MeterRegistry meterRegistry, IndicesService indicesService } private static List registerAsyncMetrics(MeterRegistry registry, IndicesStatsCache cache) { - final int TOTAL_METRICS = 36; + final int TOTAL_METRICS = 48; List metrics = new ArrayList<>(TOTAL_METRICS); for (IndexMode indexMode : IndexMode.values()) { String name = indexMode.getName(); diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index 7e3991c1df1f4..784e02059823b 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -80,6 +80,7 @@ import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; @@ -108,6 +109,7 @@ import org.elasticsearch.health.node.tracker.RepositoriesHealthTracker; import org.elasticsearch.health.stats.HealthApiStats; import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettingProvider; import org.elasticsearch.index.IndexSettingProviders; import org.elasticsearch.index.IndexingPressure; @@ -820,7 +822,10 @@ private void construct( final var parameters = new IndexSettingProvider.Parameters(indicesService::createIndexMapperServiceForValidation); IndexSettingProviders indexSettingProviders = new IndexSettingProviders( - pluginsService.flatMap(p -> p.getAdditionalIndexSettingProviders(parameters)).collect(Collectors.toSet()) + Sets.union( + builtinIndexSettingProviders(), + pluginsService.flatMap(p -> p.getAdditionalIndexSettingProviders(parameters)).collect(Collectors.toSet()) + ) ); final ShardLimitValidator shardLimitValidator = new ShardLimitValidator(settings, clusterService); @@ -1656,4 +1661,7 @@ private Module loadPersistentTasksService( }; } + private Set builtinIndexSettingProviders() { + return Set.of(new IndexMode.IndexModeSettingsProvider()); + } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/CreateIndexCapabilities.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/CreateIndexCapabilities.java index 899486399af6b..900a352d42f30 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/CreateIndexCapabilities.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/CreateIndexCapabilities.java @@ -21,5 +21,10 @@ public class CreateIndexCapabilities { */ private static final String LOGSDB_INDEX_MODE_CAPABILITY = "logsdb_index_mode"; - public static Set CAPABILITIES = Set.of(LOGSDB_INDEX_MODE_CAPABILITY); + /** + * Support lookup index mode + */ + private static final String LOOKUP_INDEX_MODE_CAPABILITY = "lookup_index_mode"; + + public static Set CAPABILITIES = Set.of(LOGSDB_INDEX_MODE_CAPABILITY, LOOKUP_INDEX_MODE_CAPABILITY); } diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index da04f30ff8023..3960aa5a91cc5 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -142,7 +142,7 @@ protected static String randomIndexOptions() { protected final DocumentMapper createDocumentMapper(XContentBuilder mappings, IndexMode indexMode) throws IOException { return switch (indexMode) { - case STANDARD -> createDocumentMapper(mappings); + case STANDARD, LOOKUP -> createDocumentMapper(mappings); case TIME_SERIES -> createTimeSeriesModeDocumentMapper(mappings); case LOGSDB -> createLogsModeDocumentMapper(mappings); }; diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java index 478a0d08d6612..150eddf039cec 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java @@ -700,6 +700,9 @@ public void testProcessOnceOnPrimary() throws Exception { case LOGSDB: settingsBuilder.put("index.mode", IndexMode.LOGSDB.getName()); break; + case LOOKUP: + settingsBuilder.put("index.mode", IndexMode.LOOKUP.getName()); + break; default: throw new UnsupportedOperationException("Unknown index mode [" + indexMode + "]"); } From 057062bcae2b935294d3b9e91cdffdecd2a34208 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 25 Oct 2024 08:09:46 +1100 Subject: [PATCH 37/60] Mute org.elasticsearch.xpack.security.CoreWithSecurityClientYamlTestSuiteIT test {yaml=cluster.stats/30_ccs_stats/cross-cluster search stats search} #115600 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 827a604cd6a19..4af02859d88d4 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -273,6 +273,9 @@ tests: - class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT method: test {yaml=reference/esql/esql-across-clusters/line_197} issue: https://github.com/elastic/elasticsearch/issues/115575 +- class: org.elasticsearch.xpack.security.CoreWithSecurityClientYamlTestSuiteIT + method: test {yaml=cluster.stats/30_ccs_stats/cross-cluster search stats search} + issue: https://github.com/elastic/elasticsearch/issues/115600 # Examples: # From d5265bef572eaa87cc07b861ad00c74f8a955fbf Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Thu, 24 Oct 2024 23:17:06 +0200 Subject: [PATCH 38/60] Replace IndexNameExpressionResolver.ExpressionList with imperative logic (#115487) The approach taken by `ExpressionList` becomes very expensive for large numbers of indices/datastreams. It implies that large lists of concrete names (as they are passed down from the transport layer via e.g. security) are copied at least twice during iteration. Removing the intermediary list and inlining the logic brings down the latency of searches targetting many shards/indices at once and allows for subsequent optimizations. The removed tests appear redundant as they tested an implementation detail of the IndexNameExpressionResolver which itself is well covered by its own tests. --- .../metadata/IndexNameExpressionResolver.java | 186 +++++------ .../cluster/metadata/ExpressionListTests.java | 309 ------------------ 2 files changed, 85 insertions(+), 410 deletions(-) delete mode 100644 server/src/test/java/org/elasticsearch/cluster/metadata/ExpressionListTests.java diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index 2229166a2d779..39499253c8790 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -48,7 +48,6 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; @@ -253,7 +252,7 @@ protected static Collection resolveExpressions(Context context, String.. } else { return ExplicitResourceNameFilter.filterUnavailable( context, - DateMathExpressionResolver.resolve(context, List.of(expressions)) + DateMathExpressionResolver.resolve(context, Arrays.asList(expressions)) ); } } else { @@ -264,7 +263,10 @@ protected static Collection resolveExpressions(Context context, String.. } else { return WildcardExpressionResolver.resolve( context, - ExplicitResourceNameFilter.filterUnavailable(context, DateMathExpressionResolver.resolve(context, List.of(expressions))) + ExplicitResourceNameFilter.filterUnavailable( + context, + DateMathExpressionResolver.resolve(context, Arrays.asList(expressions)) + ) ); } } @@ -1294,34 +1296,51 @@ private static boolean shouldIncludeIfAlias(IndexAbstraction ia, IndexNameExpres * */ public static Collection resolve(Context context, List expressions) { - ExpressionList expressionList = new ExpressionList(context, expressions); // fast exit if there are no wildcards to evaluate - if (expressionList.hasWildcard() == false) { + if (context.getOptions().expandWildcardExpressions() == false) { + return expressions; + } + int firstWildcardIndex = 0; + for (; firstWildcardIndex < expressions.size(); firstWildcardIndex++) { + String expression = expressions.get(firstWildcardIndex); + if (isWildcard(expression)) { + break; + } + } + if (firstWildcardIndex == expressions.size()) { return expressions; } Set result = new HashSet<>(); - for (ExpressionList.Expression expression : expressionList) { - if (expression.isWildcard()) { - Stream matchingResources = matchResourcesToWildcard(context, expression.get()); + for (int i = 0; i < firstWildcardIndex; i++) { + result.add(expressions.get(i)); + } + AtomicBoolean emptyWildcardExpansion = context.getOptions().allowNoIndices() ? null : new AtomicBoolean(); + for (int i = firstWildcardIndex; i < expressions.size(); i++) { + String expression = expressions.get(i); + boolean isExclusion = i > firstWildcardIndex && expression.charAt(0) == '-'; + if (i == firstWildcardIndex || isWildcard(expression)) { + Stream matchingResources = matchResourcesToWildcard( + context, + isExclusion ? expression.substring(1) : expression + ); Stream matchingOpenClosedNames = expandToOpenClosed(context, matchingResources); - AtomicBoolean emptyWildcardExpansion = new AtomicBoolean(false); - if (context.getOptions().allowNoIndices() == false) { + if (emptyWildcardExpansion != null) { emptyWildcardExpansion.set(true); matchingOpenClosedNames = matchingOpenClosedNames.peek(x -> emptyWildcardExpansion.set(false)); } - if (expression.isExclusion()) { - matchingOpenClosedNames.forEachOrdered(result::remove); + if (isExclusion) { + matchingOpenClosedNames.forEach(result::remove); } else { - matchingOpenClosedNames.forEachOrdered(result::add); + matchingOpenClosedNames.forEach(result::add); } - if (emptyWildcardExpansion.get()) { - throw notFoundException(expression.get()); + if (emptyWildcardExpansion != null && emptyWildcardExpansion.get()) { + throw notFoundException(expression); } } else { - if (expression.isExclusion()) { - result.remove(expression.get()); + if (isExclusion) { + result.remove(expression.substring(1)); } else { - result.add(expression.get()); + result.add(expression); } } } @@ -1507,27 +1526,35 @@ private DateMathExpressionResolver() { // utility class } + /** + * Resolves date math expressions. If this is a noop the given {@code expressions} list is returned without copying. + * As a result callers of this method should not mutate the returned list. Mutating it may come with unexpected side effects. + */ public static List resolve(Context context, List expressions) { - List result = new ArrayList<>(expressions.size()); - for (ExpressionList.Expression expression : new ExpressionList(context, expressions)) { - result.add(resolveExpression(expression, context::getStartTime)); + boolean wildcardSeen = false; + final boolean expandWildcards = context.getOptions().expandWildcardExpressions(); + String[] result = null; + for (int i = 0, n = expressions.size(); i < n; i++) { + String expression = expressions.get(i); + // accepts date-math exclusions that are of the form "-<...{}>",f i.e. the "-" is outside the "<>" date-math template + boolean isExclusion = wildcardSeen && expression.startsWith("-"); + wildcardSeen = wildcardSeen || (expandWildcards && isWildcard(expression)); + String toResolve = isExclusion ? expression.substring(1) : expression; + String resolved = resolveExpression(toResolve, context::getStartTime); + if (toResolve != resolved) { + if (result == null) { + result = expressions.toArray(Strings.EMPTY_ARRAY); + } + result[i] = isExclusion ? "-" + resolved : resolved; + } } - return result; + return result == null ? expressions : Arrays.asList(result); } static String resolveExpression(String expression) { return resolveExpression(expression, System::currentTimeMillis); } - static String resolveExpression(ExpressionList.Expression expression, LongSupplier getTime) { - if (expression.isExclusion()) { - // accepts date-math exclusions that are of the form "-<...{}>", i.e. the "-" is outside the "<>" date-math template - return "-" + resolveExpression(expression.get(), getTime); - } else { - return resolveExpression(expression.get(), getTime); - } - } - static String resolveExpression(String expression, LongSupplier getTime) { if (expression.startsWith(EXPRESSION_LEFT_BOUND) == false || expression.endsWith(EXPRESSION_RIGHT_BOUND) == false) { return expression; @@ -1689,14 +1716,35 @@ private ExplicitResourceNameFilter() { */ public static List filterUnavailable(Context context, List expressions) { ensureRemoteIndicesRequireIgnoreUnavailable(context.getOptions(), expressions); - List result = new ArrayList<>(expressions.size()); - for (ExpressionList.Expression expression : new ExpressionList(context, expressions)) { - validateAliasOrIndex(expression); - if (expression.isWildcard() || expression.isExclusion() || ensureAliasOrIndexExists(context, expression.get())) { - result.add(expression.expression()); + final boolean expandWildcards = context.getOptions().expandWildcardExpressions(); + boolean wildcardSeen = false; + List result = null; + for (int i = 0; i < expressions.size(); i++) { + String expression = expressions.get(i); + if (Strings.isEmpty(expression)) { + throw notFoundException(expression); + } + // Expressions can not start with an underscore. This is reserved for APIs. If the check gets here, the API + // does not exist and the path is interpreted as an expression. If the expression begins with an underscore, + // throw a specific error that is different from the [[IndexNotFoundException]], which is typically thrown + // if the expression can't be found. + if (expression.charAt(0) == '_') { + throw new InvalidIndexNameException(expression, "must not start with '_'."); + } + final boolean isWildcard = expandWildcards && isWildcard(expression); + if (isWildcard || (wildcardSeen && expression.charAt(0) == '-') || ensureAliasOrIndexExists(context, expression)) { + if (result != null) { + result.add(expression); + } + } else { + if (result == null) { + result = new ArrayList<>(expressions.size() - 1); + result.addAll(expressions.subList(0, i)); + } } + wildcardSeen |= isWildcard; } - return result; + return result == null ? expressions : result; } /** @@ -1736,19 +1784,6 @@ private static boolean ensureAliasOrIndexExists(Context context, String name) { return true; } - private static void validateAliasOrIndex(ExpressionList.Expression expression) { - if (Strings.isEmpty(expression.expression())) { - throw notFoundException(expression.expression()); - } - // Expressions can not start with an underscore. This is reserved for APIs. If the check gets here, the API - // does not exist and the path is interpreted as an expression. If the expression begins with an underscore, - // throw a specific error that is different from the [[IndexNotFoundException]], which is typically thrown - // if the expression can't be found. - if (expression.expression().charAt(0) == '_') { - throw new InvalidIndexNameException(expression.expression(), "must not start with '_'."); - } - } - private static void ensureRemoteIndicesRequireIgnoreUnavailable(IndicesOptions options, List indexExpressions) { if (options.ignoreUnavailable()) { return; @@ -1773,57 +1808,6 @@ private static void failOnRemoteIndicesNotIgnoringUnavailable(List index } } - /** - * Used to iterate expression lists and work out which expression item is a wildcard or an exclusion. - */ - public static final class ExpressionList implements Iterable { - private final List expressionsList; - private final boolean hasWildcard; - - public record Expression(String expression, boolean isWildcard, boolean isExclusion) { - public String get() { - if (isExclusion()) { - // drop the leading "-" if exclusion because it is easier for callers to handle it like this - return expression().substring(1); - } else { - return expression(); - } - } - } - - /** - * Creates the expression iterable that can be used to easily check which expression item is a wildcard or an exclusion (or both). - * The {@param context} is used to check if wildcards ought to be considered or not. - */ - public ExpressionList(Context context, List expressionStrings) { - List expressionsList = new ArrayList<>(expressionStrings.size()); - boolean wildcardSeen = false; - for (String expressionString : expressionStrings) { - boolean isExclusion = expressionString.startsWith("-") && wildcardSeen; - if (context.getOptions().expandWildcardExpressions() && isWildcard(expressionString)) { - wildcardSeen = true; - expressionsList.add(new Expression(expressionString, true, isExclusion)); - } else { - expressionsList.add(new Expression(expressionString, false, isExclusion)); - } - } - this.expressionsList = expressionsList; - this.hasWildcard = wildcardSeen; - } - - /** - * Returns {@code true} if the expression contains any wildcard and the options allow wildcard expansion - */ - public boolean hasWildcard() { - return this.hasWildcard; - } - - @Override - public Iterator iterator() { - return expressionsList.iterator(); - } - } - /** * This is a context for the DateMathExpressionResolver which does not require {@code IndicesOptions} or {@code ClusterState} * since it uses only the start time to resolve expressions. diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/ExpressionListTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/ExpressionListTests.java deleted file mode 100644 index 1ca59ff402bd8..0000000000000 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/ExpressionListTests.java +++ /dev/null @@ -1,309 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.cluster.metadata; - -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.Context; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ExpressionList; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ExpressionList.Expression; -import org.elasticsearch.core.Tuple; -import org.elasticsearch.test.ESTestCase; - -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.function.Supplier; - -import static org.hamcrest.Matchers.is; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class ExpressionListTests extends ESTestCase { - - public void testEmpty() { - ExpressionList expressionList = new ExpressionList(getContextWithOptions(getExpandWildcardsIndicesOptions()), List.of()); - assertThat(expressionList.iterator().hasNext(), is(false)); - assertThat(expressionList.hasWildcard(), is(false)); - expressionList = new ExpressionList(getContextWithOptions(getNoExpandWildcardsIndicesOptions()), List.of()); - assertThat(expressionList.iterator().hasNext(), is(false)); - assertThat(expressionList.hasWildcard(), is(false)); - } - - public void testExplicitSingleNameExpression() { - for (IndicesOptions indicesOptions : List.of(getExpandWildcardsIndicesOptions(), getNoExpandWildcardsIndicesOptions())) { - for (String expressionString : List.of("non_wildcard", "-non_exclusion")) { - ExpressionList expressionList = new ExpressionList(getContextWithOptions(indicesOptions), List.of(expressionString)); - assertThat(expressionList.hasWildcard(), is(false)); - if (randomBoolean()) { - expressionList = new ExpressionList(getContextWithOptions(indicesOptions), List.of(expressionString)); - } - Iterator expressionIterator = expressionList.iterator(); - assertThat(expressionIterator.hasNext(), is(true)); - if (randomBoolean()) { - expressionIterator = expressionList.iterator(); - } - Expression expression = expressionIterator.next(); - assertThat(expression.isExclusion(), is(false)); - assertThat(expression.isWildcard(), is(false)); - assertThat(expression.get(), is(expressionString)); - assertThat(expressionIterator.hasNext(), is(false)); - } - } - } - - public void testWildcardSingleExpression() { - for (String wildcardTest : List.of("*", "a*", "*b", "a*b", "a-*b", "a*-b", "-*", "-a*", "-*b", "**", "*-*")) { - ExpressionList expressionList = new ExpressionList( - getContextWithOptions(getExpandWildcardsIndicesOptions()), - List.of(wildcardTest) - ); - assertThat(expressionList.hasWildcard(), is(true)); - if (randomBoolean()) { - expressionList = new ExpressionList(getContextWithOptions(getExpandWildcardsIndicesOptions()), List.of(wildcardTest)); - } - Iterator expressionIterator = expressionList.iterator(); - assertThat(expressionIterator.hasNext(), is(true)); - if (randomBoolean()) { - expressionIterator = expressionList.iterator(); - } - Expression expression = expressionIterator.next(); - assertThat(expression.isExclusion(), is(false)); - assertThat(expression.isWildcard(), is(true)); - assertThat(expression.get(), is(wildcardTest)); - assertThat(expressionIterator.hasNext(), is(false)); - } - } - - public void testWildcardLongerExpression() { - List onlyExplicits = randomList(7, () -> randomAlphaOfLengthBetween(0, 5)); - String wildcard = randomFrom("*", "*b", "-*", "*-", "c*", "a*b", "**"); - List expressionList = new ArrayList<>(onlyExplicits.size() + 1); - expressionList.addAll(randomSubsetOf(onlyExplicits)); - int wildcardPos = expressionList.size(); - expressionList.add(wildcard); - for (String item : onlyExplicits) { - if (expressionList.contains(item) == false) { - expressionList.add(item); - } - } - ExpressionList expressionIterable = new ExpressionList(getContextWithOptions(getExpandWildcardsIndicesOptions()), expressionList); - assertThat(expressionIterable.hasWildcard(), is(true)); - if (randomBoolean()) { - expressionIterable = new ExpressionList(getContextWithOptions(getExpandWildcardsIndicesOptions()), expressionList); - } - int i = 0; - for (Expression expression : expressionIterable) { - assertThat(expression.isExclusion(), is(false)); - if (i != wildcardPos) { - assertThat(expression.isWildcard(), is(false)); - } else { - assertThat(expression.isWildcard(), is(true)); - } - assertThat(expression.get(), is(expressionList.get(i++))); - } - } - - public void testWildcardsNoExclusionExpressions() { - for (List wildcardExpression : List.of( - List.of("*"), - List.of("a", "*"), - List.of("-b", "*c"), - List.of("-", "a", "c*"), - List.of("*", "a*", "*b"), - List.of("-*", "a", "b*") - )) { - ExpressionList expressionList = new ExpressionList( - getContextWithOptions(getExpandWildcardsIndicesOptions()), - wildcardExpression - ); - assertThat(expressionList.hasWildcard(), is(true)); - if (randomBoolean()) { - expressionList = new ExpressionList(getContextWithOptions(getExpandWildcardsIndicesOptions()), wildcardExpression); - } - int i = 0; - for (Expression expression : expressionList) { - assertThat(expression.isExclusion(), is(false)); - if (wildcardExpression.get(i).contains("*")) { - assertThat(expression.isWildcard(), is(true)); - } else { - assertThat(expression.isWildcard(), is(false)); - } - assertThat(expression.get(), is(wildcardExpression.get(i++))); - } - } - } - - public void testWildcardExpressionNoExpandOptions() { - for (List wildcardExpression : List.of( - List.of("*"), - List.of("a", "*"), - List.of("-b", "*c"), - List.of("*d", "-"), - List.of("*", "-*"), - List.of("-", "a", "c*"), - List.of("*", "a*", "*b") - )) { - ExpressionList expressionList = new ExpressionList( - getContextWithOptions(getNoExpandWildcardsIndicesOptions()), - wildcardExpression - ); - assertThat(expressionList.hasWildcard(), is(false)); - if (randomBoolean()) { - expressionList = new ExpressionList(getContextWithOptions(getNoExpandWildcardsIndicesOptions()), wildcardExpression); - } - int i = 0; - for (Expression expression : expressionList) { - assertThat(expression.isWildcard(), is(false)); - assertThat(expression.isExclusion(), is(false)); - assertThat(expression.get(), is(wildcardExpression.get(i++))); - } - } - } - - public void testSingleExclusionExpression() { - String wildcard = randomFrom("*", "*b", "-*", "*-", "c*", "a*b", "**", "*-*"); - int wildcardPos = randomIntBetween(0, 3); - String exclusion = randomFrom("-*", "-", "-c*", "-ab", "--"); - int exclusionPos = randomIntBetween(wildcardPos + 1, 7); - List exclusionExpression = new ArrayList<>(); - for (int i = 0; i < wildcardPos; i++) { - exclusionExpression.add(randomAlphaOfLengthBetween(0, 5)); - } - exclusionExpression.add(wildcard); - for (int i = wildcardPos + 1; i < exclusionPos; i++) { - exclusionExpression.add(randomAlphaOfLengthBetween(0, 5)); - } - exclusionExpression.add(exclusion); - for (int i = 0; i < randomIntBetween(0, 3); i++) { - exclusionExpression.add(randomAlphaOfLengthBetween(0, 5)); - } - ExpressionList expressionList = new ExpressionList(getContextWithOptions(getExpandWildcardsIndicesOptions()), exclusionExpression); - if (randomBoolean()) { - assertThat(expressionList.hasWildcard(), is(true)); - } - int i = 0; - for (Expression expression : expressionList) { - if (i == wildcardPos) { - assertThat(expression.isWildcard(), is(true)); - assertThat(expression.isExclusion(), is(false)); - assertThat(expression.get(), is(exclusionExpression.get(i++))); - } else if (i == exclusionPos) { - assertThat(expression.isExclusion(), is(true)); - assertThat(expression.isWildcard(), is(exclusionExpression.get(i).contains("*"))); - assertThat(expression.get(), is(exclusionExpression.get(i++).substring(1))); - } else { - assertThat(expression.isWildcard(), is(false)); - assertThat(expression.isExclusion(), is(false)); - assertThat(expression.get(), is(exclusionExpression.get(i++))); - } - } - } - - public void testExclusionsExpression() { - for (Tuple, List> exclusionExpression : List.of( - new Tuple<>(List.of("-a", "*", "-a"), List.of(false, false, true)), - new Tuple<>(List.of("-b*", "c", "-a"), List.of(false, false, true)), - new Tuple<>(List.of("*d", "-", "*b"), List.of(false, true, false)), - new Tuple<>(List.of("-", "--", "-*", "", "-*"), List.of(false, false, false, false, true)), - new Tuple<>(List.of("*-", "-*", "a", "-b"), List.of(false, true, false, true)), - new Tuple<>(List.of("a", "-b", "-*", "-b", "*", "-b"), List.of(false, false, false, true, false, true)), - new Tuple<>(List.of("-a", "*d", "-a", "-*b", "-b", "--"), List.of(false, false, true, true, true, true)) - )) { - ExpressionList expressionList = new ExpressionList( - getContextWithOptions(getExpandWildcardsIndicesOptions()), - exclusionExpression.v1() - ); - if (randomBoolean()) { - assertThat(expressionList.hasWildcard(), is(true)); - } - int i = 0; - for (Expression expression : expressionList) { - boolean isExclusion = exclusionExpression.v2().get(i); - assertThat(expression.isExclusion(), is(isExclusion)); - assertThat(expression.isWildcard(), is(exclusionExpression.v1().get(i).contains("*"))); - if (isExclusion) { - assertThat(expression.get(), is(exclusionExpression.v1().get(i++).substring(1))); - } else { - assertThat(expression.get(), is(exclusionExpression.v1().get(i++))); - } - } - } - } - - private IndicesOptions getExpandWildcardsToOpenOnlyIndicesOptions() { - return IndicesOptions.fromOptions( - randomBoolean(), - randomBoolean(), - true, - false, - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean() - ); - } - - private IndicesOptions getExpandWildcardsToCloseOnlyIndicesOptions() { - return IndicesOptions.fromOptions( - randomBoolean(), - randomBoolean(), - false, - true, - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean() - ); - } - - private IndicesOptions getExpandWildcardsToOpenCloseIndicesOptions() { - return IndicesOptions.fromOptions( - randomBoolean(), - randomBoolean(), - true, - true, - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean() - ); - } - - private IndicesOptions getExpandWildcardsIndicesOptions() { - return ESTestCase.>randomFrom( - this::getExpandWildcardsToOpenOnlyIndicesOptions, - this::getExpandWildcardsToCloseOnlyIndicesOptions, - this::getExpandWildcardsToOpenCloseIndicesOptions - ).get(); - } - - private IndicesOptions getNoExpandWildcardsIndicesOptions() { - return IndicesOptions.fromOptions( - randomBoolean(), - randomBoolean(), - false, - false, - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean(), - randomBoolean() - ); - } - - private Context getContextWithOptions(IndicesOptions indicesOptions) { - Context context = mock(Context.class); - when(context.getOptions()).thenReturn(indicesOptions); - return context; - } -} From b2ab9df1a9ff71442ad8d695ec15fcf8b72e133d Mon Sep 17 00:00:00 2001 From: David Kyle Date: Thu, 24 Oct 2024 22:33:56 +0100 Subject: [PATCH 39/60] [ML] Fix timeout attaching to missing deployment (#115517) Fixes a timeout in the Inference API where if connecting to an existing deployment and that deployment does not exist the listener was not called. --- .../xpack/inference/CreateFromDeploymentIT.java | 8 ++++++++ .../ElasticsearchInternalService.java | 14 +++++++------- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/CreateFromDeploymentIT.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/CreateFromDeploymentIT.java index 0bfb6e9e43b03..273b16d295a3d 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/CreateFromDeploymentIT.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/CreateFromDeploymentIT.java @@ -109,6 +109,14 @@ public void testModelIdDoesNotMatch() throws IOException { ); } + public void testDeploymentDoesNotExist() { + var deploymentId = "missing_deployment"; + + var inferenceId = "inference_on_missing_deployment"; + var e = expectThrows(ResponseException.class, () -> putModel(inferenceId, endpointConfig(deploymentId), TaskType.SPARSE_EMBEDDING)); + assertThat(e.getMessage(), containsString("Cannot find deployment [missing_deployment]")); + } + public void testNumAllocationsIsUpdated() throws IOException { var modelId = "update_num_allocations"; var deploymentId = modelId; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java index a0235f74ce511..fec690199d97d 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java @@ -34,7 +34,6 @@ import org.elasticsearch.xpack.core.inference.results.SparseEmbeddingResults; import org.elasticsearch.xpack.core.ml.action.GetDeploymentStatsAction; import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; -import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsStatsAction; import org.elasticsearch.xpack.core.ml.action.InferModelAction; import org.elasticsearch.xpack.core.ml.inference.assignment.AdaptiveAllocationsSettings; import org.elasticsearch.xpack.core.ml.inference.assignment.AssignmentStats; @@ -913,7 +912,7 @@ private void validateAgainstDeployment( listener.onFailure( new ElasticsearchStatusException( "Deployment [{}] uses model [{}] which does not match the model [{}] in the request.", - RestStatus.BAD_REQUEST, // TODO better message + RestStatus.BAD_REQUEST, deploymentId, response.get().getModelId(), modelId @@ -933,21 +932,22 @@ private void validateAgainstDeployment( checkTaskTypeForMlNodeModel(response.get().getModelId(), taskType, l.delegateFailureAndWrap((l2, compatibleTaskType) -> { l2.onResponse(updatedSettings); })); + } else { + listener.onFailure(new ElasticsearchStatusException("Cannot find deployment [{}]", RestStatus.NOT_FOUND, deploymentId)); } })); } private void getDeployment(String deploymentId, ActionListener> listener) { client.execute( - GetTrainedModelsStatsAction.INSTANCE, - new GetTrainedModelsStatsAction.Request(deploymentId), + GetDeploymentStatsAction.INSTANCE, + new GetDeploymentStatsAction.Request(deploymentId), listener.delegateFailureAndWrap((l, response) -> { l.onResponse( - response.getResources() + response.getStats() .results() .stream() - .filter(s -> s.getDeploymentStats() != null && s.getDeploymentStats().getDeploymentId().equals(deploymentId)) - .map(GetTrainedModelsStatsAction.Response.TrainedModelStats::getDeploymentStats) + .filter(s -> s.getDeploymentId() != null && s.getDeploymentId().equals(deploymentId)) .findFirst() ); }) From c556a293c384b92a9ef71ec37bd49fb143300236 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 25 Oct 2024 08:50:44 +1100 Subject: [PATCH 40/60] Mute org.elasticsearch.test.rest.ClientYamlTestSuiteIT test {yaml=indices.create/10_basic/Create lookup index} #115605 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 4af02859d88d4..084bf27d6a11b 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -276,6 +276,9 @@ tests: - class: org.elasticsearch.xpack.security.CoreWithSecurityClientYamlTestSuiteIT method: test {yaml=cluster.stats/30_ccs_stats/cross-cluster search stats search} issue: https://github.com/elastic/elasticsearch/issues/115600 +- class: org.elasticsearch.test.rest.ClientYamlTestSuiteIT + method: test {yaml=indices.create/10_basic/Create lookup index} + issue: https://github.com/elastic/elasticsearch/issues/115605 # Examples: # From 5714b989fabcf944fb719f31200661789e0824f2 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 24 Oct 2024 16:58:41 -0700 Subject: [PATCH 41/60] Do not run lookup index YAML with two shards (#115608) We can randomly inject a global template that defaults to 2 shards instead of 1. This causes the lookup index YAML tests to fail. To avoid this, the change requires specifying the default_shards setting for these tests --- .../resources/rest-api-spec/test/indices.create/10_basic.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml index 31d127b80c844..d0e1759073e1b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/10_basic.yml @@ -153,7 +153,7 @@ --- "Create lookup index": - requires: - test_runner_features: [ capabilities ] + test_runner_features: [ capabilities, default_shards ] capabilities: - method: PUT path: /{index} @@ -176,7 +176,7 @@ --- "Create lookup index with one shard": - requires: - test_runner_features: [ capabilities ] + test_runner_features: [ capabilities, default_shards ] capabilities: - method: PUT path: /{index} From bbd887a66a1330188047825799dc8368dbd56ba8 Mon Sep 17 00:00:00 2001 From: Carlos Delgado <6339205+carlosdelest@users.noreply.github.com> Date: Fri, 25 Oct 2024 07:45:40 +0200 Subject: [PATCH 42/60] Identify system threads using a Thread subclass (#113562) --- .../common/util/concurrent/EsExecutors.java | 35 ++++++++++--- .../DefaultBuiltInExecutorBuilders.java | 12 +++-- .../threadpool/ExecutorBuilder.java | 7 ++- .../threadpool/FixedExecutorBuilder.java | 49 +++++++++++++++++-- .../threadpool/ScalingExecutorBuilder.java | 4 +- .../util/concurrent/EsExecutorsTests.java | 8 ++- 6 files changed, 98 insertions(+), 17 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java index b10db7d4d1dd3..9120576815bac 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java @@ -326,16 +326,25 @@ public static String executorName(Thread thread) { } public static ThreadFactory daemonThreadFactory(Settings settings, String namePrefix) { - return daemonThreadFactory(threadName(settings, namePrefix)); + return createDaemonThreadFactory(threadName(settings, namePrefix), false); } public static ThreadFactory daemonThreadFactory(String nodeName, String namePrefix) { + return daemonThreadFactory(nodeName, namePrefix, false); + } + + public static ThreadFactory daemonThreadFactory(String nodeName, String namePrefix, boolean isSystemThread) { assert nodeName != null && false == nodeName.isEmpty(); - return daemonThreadFactory(threadName(nodeName, namePrefix)); + return createDaemonThreadFactory(threadName(nodeName, namePrefix), isSystemThread); } - public static ThreadFactory daemonThreadFactory(String namePrefix) { - return new EsThreadFactory(namePrefix); + public static ThreadFactory daemonThreadFactory(String name) { + assert name != null && name.isEmpty() == false; + return createDaemonThreadFactory(name, false); + } + + private static ThreadFactory createDaemonThreadFactory(String namePrefix, boolean isSystemThread) { + return new EsThreadFactory(namePrefix, isSystemThread); } static class EsThreadFactory implements ThreadFactory { @@ -343,22 +352,36 @@ static class EsThreadFactory implements ThreadFactory { final ThreadGroup group; final AtomicInteger threadNumber = new AtomicInteger(1); final String namePrefix; + final boolean isSystem; - EsThreadFactory(String namePrefix) { + EsThreadFactory(String namePrefix, boolean isSystem) { this.namePrefix = namePrefix; SecurityManager s = System.getSecurityManager(); group = (s != null) ? s.getThreadGroup() : Thread.currentThread().getThreadGroup(); + this.isSystem = isSystem; } @Override public Thread newThread(Runnable r) { return AccessController.doPrivileged((PrivilegedAction) () -> { - Thread t = new Thread(group, r, namePrefix + "[T#" + threadNumber.getAndIncrement() + "]", 0); + Thread t = new EsThread(group, r, namePrefix + "[T#" + threadNumber.getAndIncrement() + "]", 0, isSystem); t.setDaemon(true); return t; }); } + } + public static class EsThread extends Thread { + private final boolean isSystem; + + EsThread(ThreadGroup group, Runnable target, String name, long stackSize, boolean isSystem) { + super(group, target, name, stackSize); + this.isSystem = isSystem; + } + + public boolean isSystem() { + return isSystem; + } } /** diff --git a/server/src/main/java/org/elasticsearch/threadpool/DefaultBuiltInExecutorBuilders.java b/server/src/main/java/org/elasticsearch/threadpool/DefaultBuiltInExecutorBuilders.java index c3a24d012c013..a97d22a976631 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/DefaultBuiltInExecutorBuilders.java +++ b/server/src/main/java/org/elasticsearch/threadpool/DefaultBuiltInExecutorBuilders.java @@ -170,7 +170,8 @@ public Map getBuilders(Settings settings, int allocated ThreadPool.Names.SYSTEM_READ, halfProcMaxAt5, 2000, - EsExecutors.TaskTrackingConfig.DO_NOT_TRACK + EsExecutors.TaskTrackingConfig.DO_NOT_TRACK, + true ) ); result.put( @@ -180,7 +181,8 @@ public Map getBuilders(Settings settings, int allocated ThreadPool.Names.SYSTEM_WRITE, halfProcMaxAt5, 1000, - new EsExecutors.TaskTrackingConfig(true, indexAutoscalingEWMA) + new EsExecutors.TaskTrackingConfig(true, indexAutoscalingEWMA), + true ) ); result.put( @@ -190,7 +192,8 @@ public Map getBuilders(Settings settings, int allocated ThreadPool.Names.SYSTEM_CRITICAL_READ, halfProcMaxAt5, 2000, - EsExecutors.TaskTrackingConfig.DO_NOT_TRACK + EsExecutors.TaskTrackingConfig.DO_NOT_TRACK, + true ) ); result.put( @@ -200,7 +203,8 @@ public Map getBuilders(Settings settings, int allocated ThreadPool.Names.SYSTEM_CRITICAL_WRITE, halfProcMaxAt5, 1500, - new EsExecutors.TaskTrackingConfig(true, indexAutoscalingEWMA) + new EsExecutors.TaskTrackingConfig(true, indexAutoscalingEWMA), + true ) ); return unmodifiableMap(result); diff --git a/server/src/main/java/org/elasticsearch/threadpool/ExecutorBuilder.java b/server/src/main/java/org/elasticsearch/threadpool/ExecutorBuilder.java index 2337d51d07571..c259feb1c978e 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ExecutorBuilder.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ExecutorBuilder.java @@ -24,9 +24,11 @@ public abstract class ExecutorBuilder { private final String name; + private final boolean isSystemThread; - public ExecutorBuilder(String name) { + public ExecutorBuilder(String name, boolean isSystemThread) { this.name = name; + this.isSystemThread = isSystemThread; } protected String name() { @@ -90,4 +92,7 @@ abstract static class ExecutorSettings { } + public boolean isSystemThread() { + return isSystemThread; + } } diff --git a/server/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java b/server/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java index 07db563da39a1..9c723f241f1d0 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java +++ b/server/src/main/java/org/elasticsearch/threadpool/FixedExecutorBuilder.java @@ -51,7 +51,28 @@ public final class FixedExecutorBuilder extends ExecutorBuilder( sizeKey, @@ -102,7 +145,7 @@ FixedExecutorSettings getSettings(Settings settings) { ThreadPool.ExecutorHolder build(final FixedExecutorSettings settings, final ThreadContext threadContext) { int size = settings.size; int queueSize = settings.queueSize; - final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(EsExecutors.threadName(settings.nodeName, name())); + final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(settings.nodeName, name(), isSystemThread()); final ExecutorService executor = EsExecutors.newFixed( settings.nodeName + "/" + name(), size, diff --git a/server/src/main/java/org/elasticsearch/threadpool/ScalingExecutorBuilder.java b/server/src/main/java/org/elasticsearch/threadpool/ScalingExecutorBuilder.java index a31f940cdb2dc..1017d41a77444 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ScalingExecutorBuilder.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ScalingExecutorBuilder.java @@ -104,7 +104,7 @@ public ScalingExecutorBuilder( final String prefix, final EsExecutors.TaskTrackingConfig trackingConfig ) { - super(name); + super(name, false); this.coreSetting = Setting.intSetting(settingsKey(prefix, "core"), core, Setting.Property.NodeScope); this.maxSetting = Setting.intSetting(settingsKey(prefix, "max"), max, Setting.Property.NodeScope); this.keepAliveSetting = Setting.timeSetting(settingsKey(prefix, "keep_alive"), keepAlive, Setting.Property.NodeScope); @@ -131,7 +131,7 @@ ThreadPool.ExecutorHolder build(final ScalingExecutorSettings settings, final Th int core = settings.core; int max = settings.max; final ThreadPool.Info info = new ThreadPool.Info(name(), ThreadPool.ThreadPoolType.SCALING, core, max, keepAlive, null); - final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(EsExecutors.threadName(settings.nodeName, name())); + final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(settings.nodeName, name()); ExecutorService executor; executor = EsExecutors.newScaling( settings.nodeName + "/" + name(), diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java index bdfec9dfaa630..2867c9e007937 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java @@ -635,15 +635,19 @@ public void testParseExecutorName() throws InterruptedException { final var executorName = randomAlphaOfLength(10); final String nodeName = rarely() ? null : randomIdentifier(); final ThreadFactory threadFactory; + final boolean isSystem; if (nodeName == null) { + isSystem = false; threadFactory = EsExecutors.daemonThreadFactory(Settings.EMPTY, executorName); } else if (randomBoolean()) { + isSystem = false; threadFactory = EsExecutors.daemonThreadFactory( Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), nodeName).build(), executorName ); } else { - threadFactory = EsExecutors.daemonThreadFactory(nodeName, executorName); + isSystem = randomBoolean(); + threadFactory = EsExecutors.daemonThreadFactory(nodeName, executorName, isSystem); } final var thread = threadFactory.newThread(() -> {}); @@ -652,6 +656,8 @@ public void testParseExecutorName() throws InterruptedException { assertThat(EsExecutors.executorName(thread), equalTo(executorName)); assertThat(EsExecutors.executorName("TEST-" + thread.getName()), is(nullValue())); assertThat(EsExecutors.executorName("LuceneTestCase" + thread.getName()), is(nullValue())); + assertThat(EsExecutors.executorName("LuceneTestCase" + thread.getName()), is(nullValue())); + assertThat(((EsExecutors.EsThread) thread).isSystem(), equalTo(isSystem)); } finally { thread.join(); } From 7f573c6c28fb42e89d8bb76d6764dc681c239e06 Mon Sep 17 00:00:00 2001 From: Matteo Piergiovanni <134913285+piergm@users.noreply.github.com> Date: Fri, 25 Oct 2024 08:50:05 +0200 Subject: [PATCH 43/60] Only aggregations require at least one shard request (#115314) * unskipping shards only when aggs * Update docs/changelog/115314.yaml * fixed more tests * null check for searchRequest.source() --- docs/changelog/115314.yaml | 5 +++ .../datastreams/TSDBIndexingIT.java | 2 +- .../org/elasticsearch/search/CCSDuelIT.java | 4 ++- .../test/multi_cluster/70_skip_shards.yml | 12 +++---- .../multi_cluster/90_index_name_query.yml | 4 +-- .../search/ccs/CrossClusterSearchIT.java | 4 +-- .../search/profile/query/QueryProfilerIT.java | 6 +++- .../search/stats/FieldUsageStatsIT.java | 12 ++++--- .../action/search/TransportSearchAction.java | 4 ++- .../search/CrossClusterAsyncSearchIT.java | 32 +++++++++++++------ .../mapper/SearchIdleTests.java | 10 ++---- .../rrf/RRFRankCoordinatorCanMatchIT.java | 5 +-- .../rank/rrf/RRFRankShardCanMatchIT.java | 5 +-- ...pshotsCanMatchOnCoordinatorIntegTests.java | 12 +++---- .../checkpoint/TransformCCSCanMatchIT.java | 6 ++-- .../oldrepos/OldRepositoryAccessIT.java | 3 +- 16 files changed, 70 insertions(+), 56 deletions(-) create mode 100644 docs/changelog/115314.yaml diff --git a/docs/changelog/115314.yaml b/docs/changelog/115314.yaml new file mode 100644 index 0000000000000..76ac12d58fcf3 --- /dev/null +++ b/docs/changelog/115314.yaml @@ -0,0 +1,5 @@ +pr: 115314 +summary: Only aggregations require at least one shard request +area: Search +type: enhancement +issues: [] diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java index 29ec326548f2b..aad68660d2e4d 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java @@ -412,7 +412,7 @@ public void testSkippingShards() throws Exception { assertResponse(client().search(searchRequest), searchResponse -> { ElasticsearchAssertions.assertNoSearchHits(searchResponse); assertThat(searchResponse.getTotalShards(), equalTo(2)); - assertThat(searchResponse.getSkippedShards(), equalTo(1)); + assertThat(searchResponse.getSkippedShards(), equalTo(2)); assertThat(searchResponse.getSuccessfulShards(), equalTo(2)); }); } diff --git a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java index 5dde1d664402f..79cdc1047aec9 100644 --- a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java +++ b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java @@ -43,6 +43,7 @@ import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; @@ -580,13 +581,14 @@ public void testSortByField() throws Exception { public void testSortByFieldOneClusterHasNoResults() throws Exception { assumeMultiClusterSetup(); - // set to a value greater than the number of shards to avoid differences due to the skipping of shards + // setting aggs to avoid differences due to the skipping of shards when matching none SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); boolean onlyRemote = randomBoolean(); sourceBuilder.query(new TermQueryBuilder("_index", onlyRemote ? REMOTE_INDEX_NAME : INDEX_NAME)); sourceBuilder.sort("type.keyword", SortOrder.ASC); sourceBuilder.sort("creationDate", SortOrder.DESC); sourceBuilder.sort("user.keyword", SortOrder.ASC); + sourceBuilder.aggregation(AggregationBuilders.max("max").field("creationDate")); CheckedConsumer responseChecker = response -> { assertHits(response); int size = response.evaluateArraySize("hits.hits"); diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/70_skip_shards.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/70_skip_shards.yml index 92ae11c712b25..f392ae6d09413 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/70_skip_shards.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/70_skip_shards.yml @@ -166,8 +166,7 @@ - match: { hits.total.value: 0 } - match: { _shards.total: 2 } - match: { _shards.successful: 2 } - # When all shards are skipped current logic returns 1 to produce a valid search result - - match: { _shards.skipped : 1} + - match: { _shards.skipped : 2} - match: { _shards.failed: 0 } # check that skipped when we don't match the alias with a terms query @@ -183,8 +182,7 @@ - match: { hits.total.value: 0 } - match: { _shards.total: 2 } - match: { _shards.successful: 2 } - # When all shards are skipped current logic returns 1 to produce a valid search result - - match: { _shards.skipped : 1} + - match: { _shards.skipped : 2} - match: { _shards.failed: 0 } # check that skipped when we don't match the alias with a prefix query @@ -200,8 +198,7 @@ - match: { hits.total.value: 0 } - match: { _shards.total: 2 } - match: { _shards.successful: 2 } - # When all shards are skipped current logic returns 1 to produce a valid search result - - match: { _shards.skipped : 1} + - match: { _shards.skipped : 2} - match: { _shards.failed: 0 } # check that skipped when we don't match the alias with a wildcard query @@ -217,7 +214,6 @@ - match: { hits.total.value: 0 } - match: { _shards.total: 2 } - match: { _shards.successful: 2 } - # When all shards are skipped current logic returns 1 to produce a valid search result - - match: { _shards.skipped : 1} + - match: { _shards.skipped : 2} - match: { _shards.failed: 0 } diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/90_index_name_query.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/90_index_name_query.yml index a60a1b0d812ee..be2ce033b123c 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/90_index_name_query.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/90_index_name_query.yml @@ -81,7 +81,7 @@ teardown: - match: { hits.total.value: 0 } - match: { _shards.total: 2 } - match: { _shards.successful: 2 } - - match: { _shards.skipped : 1} + - match: { _shards.skipped : 2} - match: { _shards.failed: 0 } - do: @@ -98,5 +98,5 @@ teardown: - match: { hits.total.value: 0 } - match: { _shards.total: 2 } - match: { _shards.successful: 2 } - - match: { _shards.skipped : 1} + - match: { _shards.skipped : 2} - match: { _shards.failed: 0 } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java index 5984e1acc89af..63eece88a53fc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java @@ -214,7 +214,7 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except // with DFS_QUERY_THEN_FETCH, the local shards are never skipped assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); } else { - assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards - 1)); + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards)); } assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); @@ -224,7 +224,7 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); if (clusters.isCcsMinimizeRoundtrips()) { - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards - 1)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards)); } else { assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards)); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java index e6cd89c09b979..0c1012c520dac 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java @@ -68,7 +68,11 @@ public void testProfileQuery() throws Exception { prepareSearch().setQuery(q).setTrackTotalHits(true).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH), response -> { assertNotNull("Profile response element should not be null", response.getProfileResults()); - assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); + if (response.getSkippedShards() == response.getSuccessfulShards()) { + assertEquals(0, response.getProfileResults().size()); + } else { + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); + } for (Map.Entry shard : response.getProfileResults().entrySet()) { for (QueryProfileShardResult searchProfiles : shard.getValue().getQueryProfileResults()) { for (ProfileResult result : searchProfiles.getQueryResults()) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java index 140afd6b269b3..3d5120226ebed 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java @@ -158,11 +158,15 @@ public void testFieldUsageStats() throws ExecutionException, InterruptedExceptio assertTrue(stats.hasField("date_field")); assertEquals(Set.of(UsageContext.POINTS), stats.get("date_field").keySet()); - // can_match does not enter search stats - // there is a special case though where we have no hit but we need to get at least one search response in order - // to produce a valid search result with all the aggs etc., so we hit one of the two shards + + long expectedShards = 2L * numShards; + if (numShards == 1) { + // with 1 shard and setPreFilterShardSize(1) we don't perform can_match phase but instead directly query the shard + expectedShards += 1; + } + assertEquals( - (2 * numShards) + 1, + expectedShards, indicesAdmin().prepareStats("test") .clear() .setSearch(true) diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 302c3e243a1f6..8f718972c2eaa 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -1458,6 +1458,8 @@ public SearchPhase newSearchPhase( SearchResponse.Clusters clusters ) { if (preFilter) { + // only for aggs we need to contact shards even if there are no matches + boolean requireAtLeastOneMatch = searchRequest.source() != null && searchRequest.source().aggregations() != null; return new CanMatchPreFilterSearchPhase( logger, searchTransportService, @@ -1469,7 +1471,7 @@ public SearchPhase newSearchPhase( shardIterators, timeProvider, task, - true, + requireAtLeastOneMatch, searchService.getCoordinatorRewriteContextProvider(timeProvider::absoluteStartMillis), listener.delegateFailureAndWrap( (l, iters) -> newSearchPhase( diff --git a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java index 9d83f88a043e2..3cd8778069d0c 100644 --- a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java +++ b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java @@ -274,6 +274,8 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except boolean dfs = randomBoolean(); if (dfs) { request.getSearchRequest().searchType(SearchType.DFS_QUERY_THEN_FETCH); + } else { + request.getSearchRequest().searchType(SearchType.QUERY_THEN_FETCH); } RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder("@timestamp").from(100).to(2000); request.getSearchRequest().source(new SearchSourceBuilder().query(rangeQueryBuilder).size(10)); @@ -288,20 +290,30 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except assertTrue(response.isRunning()); SearchResponse.Clusters clusters = response.getSearchResponse().getClusters(); assertThat(clusters.getTotal(), equalTo(2)); - assertTrue("search cluster results should be marked as partial", clusters.hasPartialResults()); - + if (dfs) { + assertTrue("search cluster results should be marked as partial", clusters.hasPartialResults()); + } else { + assertFalse( + "search cluster results should not be marked as partial as all shards are skipped", + clusters.hasPartialResults() + ); + } SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); assertNotNull(localClusterSearchInfo); - assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.RUNNING)); + if (dfs) { + assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.RUNNING)); + } else { + assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); + } SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); assertNotNull(remoteClusterSearchInfo); - assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.RUNNING)); } finally { response.decRef(); } - - SearchListenerPlugin.waitSearchStarted(); + if (dfs) { + SearchListenerPlugin.waitSearchStarted(); + } SearchListenerPlugin.allowQueryPhase(); waitForSearchTasksToFinish(); @@ -331,7 +343,7 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except // no skipped shards locally when DFS_QUERY_THEN_FETCH is used assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); } else { - assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards - 1)); + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards)); } assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); @@ -341,7 +353,7 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); if (minimizeRoundtrips) { - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards - 1)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards)); } else { assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards)); } @@ -377,7 +389,7 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except // no skipped shards locally when DFS_QUERY_THEN_FETCH is used assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); } else { - assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards - 1)); + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards)); } assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); @@ -387,7 +399,7 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); if (minimizeRoundtrips) { - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards - 1)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards)); } else { assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards)); } diff --git a/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/SearchIdleTests.java b/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/SearchIdleTests.java index 2da4e2802bdbe..9eb792428537b 100644 --- a/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/SearchIdleTests.java +++ b/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/SearchIdleTests.java @@ -42,7 +42,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; public class SearchIdleTests extends ESSingleNodeTestCase { @@ -133,8 +132,7 @@ public void testSearchIdleConstantKeywordMatchNoIndex() throws InterruptedExcept // WHEN assertResponse(search("test*", "constant_keyword", randomAlphaOfLength(5), 5), searchResponse -> { assertEquals(RestStatus.OK, searchResponse.status()); - // NOTE: we need an empty result from at least one shard - assertEquals(idleIndexShardsCount + activeIndexShardsCount - 1, searchResponse.getSkippedShards()); + assertEquals(idleIndexShardsCount + activeIndexShardsCount, searchResponse.getSkippedShards()); assertEquals(0, searchResponse.getFailedShards()); assertEquals(0, searchResponse.getHits().getHits().length); }); @@ -144,12 +142,8 @@ public void testSearchIdleConstantKeywordMatchNoIndex() throws InterruptedExcept assertIdleShardsRefreshStats(beforeStatsResponse, afterStatsResponse); - // If no shards match the can match phase then at least one shard gets queries for an empty response. - // However, this affects the search idle stats. List active = Arrays.stream(afterStatsResponse.getShards()).filter(s -> s.isSearchIdle() == false).toList(); - assertThat(active, hasSize(1)); - assertThat(active.get(0).getShardRouting().getIndexName(), equalTo("test1")); - assertThat(active.get(0).getShardRouting().id(), equalTo(0)); + assertThat(active, hasSize(0)); } public void testSearchIdleConstantKeywordMatchOneIndex() throws InterruptedException { diff --git a/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankCoordinatorCanMatchIT.java b/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankCoordinatorCanMatchIT.java index 445aeaa375e11..467668f008b04 100644 --- a/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankCoordinatorCanMatchIT.java +++ b/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankCoordinatorCanMatchIT.java @@ -10,6 +10,7 @@ import org.apache.lucene.document.LongPoint; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.PointValues; +import org.apache.lucene.search.TotalHits; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.Strings; import org.elasticsearch.index.IndexSettings; @@ -206,10 +207,10 @@ public void testCanMatchCoordinator() throws Exception { ) .setSize(5), response -> { - assertNull(response.getHits().getTotalHits()); + assertEquals(new TotalHits(0, TotalHits.Relation.EQUAL_TO), response.getHits().getTotalHits()); assertEquals(0, response.getHits().getHits().length); assertEquals(5, response.getSuccessfulShards()); - assertEquals(4, response.getSkippedShards()); + assertEquals(5, response.getSkippedShards()); } ); diff --git a/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankShardCanMatchIT.java b/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankShardCanMatchIT.java index 084ccc88bee33..09fe8d1b7ad6e 100644 --- a/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankShardCanMatchIT.java +++ b/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankShardCanMatchIT.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.rank.rrf; +import org.apache.lucene.search.TotalHits; import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.search.SearchType; @@ -199,10 +200,10 @@ public void testCanMatchShard() throws IOException { ) .setSize(5), response -> { - assertNull(response.getHits().getTotalHits()); + assertEquals(new TotalHits(0, TotalHits.Relation.EQUAL_TO), response.getHits().getTotalHits()); assertEquals(0, response.getHits().getHits().length); assertEquals(5, response.getSuccessfulShards()); - assertEquals(4, response.getSkippedShards()); + assertEquals(5, response.getSkippedShards()); } ); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java index ed42d86bc8c49..259d38b1fe8ee 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java @@ -384,11 +384,9 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying } } else { assertResponse(client().search(request), newSearchResponse -> { - // When all shards are skipped, at least one of them should be queried in order to - // provide a proper search response. - assertThat(newSearchResponse.getSkippedShards(), equalTo(indexOutsideSearchRangeShardCount - 1)); - assertThat(newSearchResponse.getSuccessfulShards(), equalTo(indexOutsideSearchRangeShardCount - 1)); - assertThat(newSearchResponse.getFailedShards(), equalTo(1)); + assertThat(newSearchResponse.getSkippedShards(), equalTo(indexOutsideSearchRangeShardCount)); + assertThat(newSearchResponse.getSuccessfulShards(), equalTo(indexOutsideSearchRangeShardCount)); + assertThat(newSearchResponse.getFailedShards(), equalTo(0)); assertThat(newSearchResponse.getTotalShards(), equalTo(indexOutsideSearchRangeShardCount)); }); @@ -748,9 +746,7 @@ public void testQueryPhaseIsExecutedInAnAvailableNodeWhenAllShardsCanBeSkipped() // All the regular index searches succeeded assertThat(newSearchResponse.getSuccessfulShards(), equalTo(totalShards)); assertThat(newSearchResponse.getFailedShards(), equalTo(0)); - // We have to query at least one node to construct a valid response, and we pick - // a shard that's available in order to construct the search response - assertThat(newSearchResponse.getSkippedShards(), equalTo(totalShards - 1)); + assertThat(newSearchResponse.getSkippedShards(), equalTo(totalShards)); assertThat(newSearchResponse.getTotalShards(), equalTo(totalShards)); assertThat(newSearchResponse.getHits().getTotalHits().value(), equalTo(0L)); }); diff --git a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformCCSCanMatchIT.java b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformCCSCanMatchIT.java index a7f7b5bd3edda..208da4177fd4c 100644 --- a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformCCSCanMatchIT.java +++ b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformCCSCanMatchIT.java @@ -197,15 +197,13 @@ public void testSearchAction_RangeQueryThatMatchesNoShards() throws ExecutionExc QueryBuilders.rangeQuery("@timestamp").from(100_000_000), // This query matches no documents true, 0, - // All but 2 shards are skipped. TBH I don't know why this 2 shards are not skipped - oldLocalNumShards + newLocalNumShards + oldRemoteNumShards + newRemoteNumShards - 2 + oldLocalNumShards + newLocalNumShards + oldRemoteNumShards + newRemoteNumShards ); testSearchAction( QueryBuilders.rangeQuery("@timestamp").from(100_000_000), // This query matches no documents false, 0, - // All but 1 shards are skipped. TBH I don't know why this 1 shard is not skipped - oldLocalNumShards + newLocalNumShards + oldRemoteNumShards + newRemoteNumShards - 1 + oldLocalNumShards + newLocalNumShards + oldRemoteNumShards + newRemoteNumShards ); } diff --git a/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java b/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java index f502683e42eb2..30ec6630b9618 100644 --- a/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java +++ b/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java @@ -484,8 +484,7 @@ private void assertDocs( logger.info(searchResponse); assertEquals(0, searchResponse.getHits().getTotalHits().value()); assertEquals(numberOfShards, searchResponse.getSuccessfulShards()); - // When all shards are skipped, at least one of them is queried in order to provide a proper search response. - assertEquals(numberOfShards - 1, searchResponse.getSkippedShards()); + assertEquals(numberOfShards, searchResponse.getSkippedShards()); } finally { searchResponse.decRef(); } From a0c1df0d0c4ecdb39d05186f96c4ae976fde4f3e Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 25 Oct 2024 08:51:00 +0200 Subject: [PATCH 44/60] Speedup Query Phase Merging (#113355) Reducing contention and context switching in merging for the query phase by avoiding respining the merge task repeatedly, removing things that don't need synchronization from the synchronized blocks and merging repeated loops over the same query result arrays. --- .../search/QueryPhaseResultConsumer.java | 395 +++++++++--------- .../action/search/SearchPhaseController.java | 45 +- 2 files changed, 218 insertions(+), 222 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java b/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java index 89411ac302b10..6c654d9235ec2 100644 --- a/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java +++ b/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java @@ -19,7 +19,6 @@ import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.core.Releasable; -import org.elasticsearch.core.Releasables; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.SearchShardTarget; @@ -121,26 +120,50 @@ public void consumeResult(SearchPhaseResult result, Runnable next) { public SearchPhaseController.ReducedQueryPhase reduce() throws Exception { if (pendingMerges.hasPendingMerges()) { throw new AssertionError("partial reduce in-flight"); - } else if (pendingMerges.hasFailure()) { - throw pendingMerges.getFailure(); + } + Exception failure = pendingMerges.failure.get(); + if (failure != null) { + throw failure; } // ensure consistent ordering pendingMerges.sortBuffer(); - final TopDocsStats topDocsStats = pendingMerges.consumeTopDocsStats(); - final List topDocsList = pendingMerges.consumeTopDocs(); + final TopDocsStats topDocsStats = pendingMerges.topDocsStats; + final int resultSize = pendingMerges.buffer.size() + (pendingMerges.mergeResult == null ? 0 : 1); + final List topDocsList = hasTopDocs ? new ArrayList<>(resultSize) : null; + final List> aggsList = hasAggs ? new ArrayList<>(resultSize) : null; + synchronized (pendingMerges) { + if (pendingMerges.mergeResult != null) { + if (topDocsList != null) { + topDocsList.add(pendingMerges.mergeResult.reducedTopDocs); + } + if (aggsList != null) { + aggsList.add(DelayableWriteable.referencing(pendingMerges.mergeResult.reducedAggs)); + } + } + for (QuerySearchResult result : pendingMerges.buffer) { + topDocsStats.add(result.topDocs(), result.searchTimedOut(), result.terminatedEarly()); + if (topDocsList != null) { + TopDocsAndMaxScore topDocs = result.consumeTopDocs(); + setShardIndex(topDocs.topDocs, result.getShardIndex()); + topDocsList.add(topDocs.topDocs); + } + if (aggsList != null) { + aggsList.add(result.getAggs()); + } + } + } SearchPhaseController.ReducedQueryPhase reducePhase; long breakerSize = pendingMerges.circuitBreakerBytes; try { - final List> aggsList = pendingMerges.getAggs(); - if (hasAggs) { + if (aggsList != null) { // Add an estimate of the final reduce size breakerSize = pendingMerges.addEstimateAndMaybeBreak(PendingMerges.estimateRamBytesUsedForReduce(breakerSize)); } reducePhase = SearchPhaseController.reducedQueryPhase( results.asList(), aggsList, - topDocsList, + topDocsList == null ? Collections.emptyList() : topDocsList, topDocsStats, pendingMerges.numReducePhases, false, @@ -183,65 +206,59 @@ private MergeResult partialReduce( // ensure consistent ordering Arrays.sort(toConsume, RESULT_COMPARATOR); - for (QuerySearchResult result : toConsume) { - topDocsStats.add(result.topDocs(), result.searchTimedOut(), result.terminatedEarly()); - } - + final List processedShards = new ArrayList<>(emptyResults); final TopDocs newTopDocs; + final InternalAggregations newAggs; + final List> aggsList; + final int resultSetSize = toConsume.length + (lastMerge != null ? 1 : 0); + if (hasAggs) { + aggsList = new ArrayList<>(resultSetSize); + if (lastMerge != null) { + aggsList.add(DelayableWriteable.referencing(lastMerge.reducedAggs)); + } + } else { + aggsList = null; + } + List topDocsList; if (hasTopDocs) { - List topDocsList = new ArrayList<>(); + topDocsList = new ArrayList<>(resultSetSize); if (lastMerge != null) { topDocsList.add(lastMerge.reducedTopDocs); } - for (QuerySearchResult result : toConsume) { - TopDocsAndMaxScore topDocs = result.consumeTopDocs(); - setShardIndex(topDocs.topDocs, result.getShardIndex()); - topDocsList.add(topDocs.topDocs); - } - newTopDocs = mergeTopDocs( - topDocsList, - // we have to merge here in the same way we collect on a shard - topNSize, - 0 - ); } else { - newTopDocs = null; + topDocsList = null; } - - final InternalAggregations newAggs; - if (hasAggs) { - try { - final List> aggsList = new ArrayList<>(); - if (lastMerge != null) { - aggsList.add(DelayableWriteable.referencing(lastMerge.reducedAggs)); - } - for (QuerySearchResult result : toConsume) { + try { + for (QuerySearchResult result : toConsume) { + topDocsStats.add(result.topDocs(), result.searchTimedOut(), result.terminatedEarly()); + SearchShardTarget target = result.getSearchShardTarget(); + processedShards.add(new SearchShard(target.getClusterAlias(), target.getShardId())); + if (aggsList != null) { aggsList.add(result.getAggs()); } - newAggs = InternalAggregations.topLevelReduceDelayable(aggsList, aggReduceContextBuilder.forPartialReduction()); - } finally { - for (QuerySearchResult result : toConsume) { - result.releaseAggs(); + if (topDocsList != null) { + TopDocsAndMaxScore topDocs = result.consumeTopDocs(); + setShardIndex(topDocs.topDocs, result.getShardIndex()); + topDocsList.add(topDocs.topDocs); } } - } else { - newAggs = null; + // we have to merge here in the same way we collect on a shard + newTopDocs = topDocsList == null ? null : mergeTopDocs(topDocsList, topNSize, 0); + newAggs = aggsList == null + ? null + : InternalAggregations.topLevelReduceDelayable(aggsList, aggReduceContextBuilder.forPartialReduction()); + } finally { + releaseAggs(toConsume); } - List processedShards = new ArrayList<>(emptyResults); if (lastMerge != null) { processedShards.addAll(lastMerge.processedShards); } - for (QuerySearchResult result : toConsume) { - SearchShardTarget target = result.getSearchShardTarget(); - processedShards.add(new SearchShard(target.getClusterAlias(), target.getShardId())); - } if (progressListener != SearchProgressListener.NOOP) { progressListener.notifyPartialReduce(processedShards, topDocsStats.getTotalHits(), newAggs, numReducePhases); } // we leave the results un-serialized because serializing is slow but we compute the serialized // size as an estimate of the memory used by the newly reduced aggregations. - long serializedSize = hasAggs ? DelayableWriteable.getSerializedSize(newAggs) : 0; - return new MergeResult(processedShards, newTopDocs, newAggs, hasAggs ? serializedSize : 0); + return new MergeResult(processedShards, newTopDocs, newAggs, newAggs != null ? DelayableWriteable.getSerializedSize(newAggs) : 0); } public int getNumReducePhases() { @@ -274,11 +291,7 @@ private class PendingMerges implements Releasable { @Override public synchronized void close() { - if (hasFailure()) { - assert circuitBreakerBytes == 0; - } else { - assert circuitBreakerBytes >= 0; - } + assert assertFailureAndBreakerConsistent(); releaseBuffer(); circuitBreaker.addWithoutBreaking(-circuitBreakerBytes); @@ -290,8 +303,14 @@ public synchronized void close() { } } - synchronized Exception getFailure() { - return failure.get(); + private boolean assertFailureAndBreakerConsistent() { + boolean hasFailure = failure.get() != null; + if (hasFailure) { + assert circuitBreakerBytes == 0; + } else { + assert circuitBreakerBytes >= 0; + } + return true; } boolean hasFailure() { @@ -342,56 +361,71 @@ static long estimateRamBytesUsedForReduce(long size) { } public void consume(QuerySearchResult result, Runnable next) { - boolean executeNextImmediately = true; - synchronized (this) { - if (hasFailure() || result.isNull()) { - result.consumeAll(); - if (result.isNull()) { - SearchShardTarget target = result.getSearchShardTarget(); - emptyResults.add(new SearchShard(target.getClusterAlias(), target.getShardId())); - } - } else { - if (hasAggs) { - long aggsSize = ramBytesUsedQueryResult(result); - try { - addEstimateAndMaybeBreak(aggsSize); - } catch (Exception exc) { - result.releaseAggs(); - releaseBuffer(); - onMergeFailure(exc); - next.run(); - return; + if (hasFailure()) { + result.consumeAll(); + next.run(); + } else if (result.isNull()) { + result.consumeAll(); + SearchShardTarget target = result.getSearchShardTarget(); + SearchShard searchShard = new SearchShard(target.getClusterAlias(), target.getShardId()); + synchronized (this) { + emptyResults.add(searchShard); + } + next.run(); + } else { + final long aggsSize = ramBytesUsedQueryResult(result); + boolean executeNextImmediately = true; + boolean hasFailure = false; + synchronized (this) { + if (hasFailure()) { + hasFailure = true; + } else { + if (hasAggs) { + try { + addEstimateAndMaybeBreak(aggsSize); + } catch (Exception exc) { + releaseBuffer(); + onMergeFailure(exc); + hasFailure = true; + } + } + if (hasFailure == false) { + aggsCurrentBufferSize += aggsSize; + // add one if a partial merge is pending + int size = buffer.size() + (hasPartialReduce ? 1 : 0); + if (size >= batchReduceSize) { + hasPartialReduce = true; + executeNextImmediately = false; + QuerySearchResult[] clone = buffer.toArray(QuerySearchResult[]::new); + MergeTask task = new MergeTask(clone, aggsCurrentBufferSize, new ArrayList<>(emptyResults), next); + aggsCurrentBufferSize = 0; + buffer.clear(); + emptyResults.clear(); + queue.add(task); + tryExecuteNext(); + } + buffer.add(result); } - aggsCurrentBufferSize += aggsSize; - } - // add one if a partial merge is pending - int size = buffer.size() + (hasPartialReduce ? 1 : 0); - if (size >= batchReduceSize) { - hasPartialReduce = true; - executeNextImmediately = false; - QuerySearchResult[] clone = buffer.toArray(QuerySearchResult[]::new); - MergeTask task = new MergeTask(clone, aggsCurrentBufferSize, new ArrayList<>(emptyResults), next); - aggsCurrentBufferSize = 0; - buffer.clear(); - emptyResults.clear(); - queue.add(task); - tryExecuteNext(); } - buffer.add(result); } - } - if (executeNextImmediately) { - next.run(); + if (hasFailure) { + result.consumeAll(); + } + if (executeNextImmediately) { + next.run(); + } } } private void releaseBuffer() { - buffer.forEach(QuerySearchResult::releaseAggs); + for (QuerySearchResult querySearchResult : buffer) { + querySearchResult.releaseAggs(); + } buffer.clear(); } private synchronized void onMergeFailure(Exception exc) { - if (hasFailure()) { + if (failure.compareAndSet(null, exc) == false) { assert circuitBreakerBytes == 0; return; } @@ -401,79 +435,89 @@ private synchronized void onMergeFailure(Exception exc) { circuitBreaker.addWithoutBreaking(-circuitBreakerBytes); circuitBreakerBytes = 0; } - failure.compareAndSet(null, exc); - final List toCancels = new ArrayList<>(); - toCancels.add(() -> onPartialMergeFailure.accept(exc)); + onPartialMergeFailure.accept(exc); final MergeTask task = runningTask.getAndSet(null); if (task != null) { - toCancels.add(task::cancel); + task.cancel(); } MergeTask mergeTask; while ((mergeTask = queue.pollFirst()) != null) { - toCancels.add(mergeTask::cancel); + mergeTask.cancel(); } mergeResult = null; - Releasables.close(toCancels); - } - - private void onAfterMerge(MergeTask task, MergeResult newResult, long estimatedSize) { - synchronized (this) { - if (hasFailure()) { - return; - } - runningTask.compareAndSet(task, null); - mergeResult = newResult; - if (hasAggs) { - // Update the circuit breaker to remove the size of the source aggregations - // and replace the estimation with the serialized size of the newly reduced result. - long newSize = mergeResult.estimatedSize - estimatedSize; - addWithoutBreaking(newSize); - logger.trace( - "aggs partial reduction [{}->{}] max [{}]", - estimatedSize, - mergeResult.estimatedSize, - maxAggsCurrentBufferSize - ); - } - task.consumeListener(); - } } private void tryExecuteNext() { final MergeTask task; synchronized (this) { - if (queue.isEmpty() || hasFailure() || runningTask.get() != null) { + if (hasFailure() || runningTask.get() != null) { return; } task = queue.poll(); - runningTask.compareAndSet(null, task); + runningTask.set(task); + } + if (task == null) { + return; } executor.execute(new AbstractRunnable() { @Override protected void doRun() { - final MergeResult thisMergeResult = mergeResult; - long estimatedTotalSize = (thisMergeResult != null ? thisMergeResult.estimatedSize : 0) + task.aggsBufferSize; - final MergeResult newMerge; - final QuerySearchResult[] toConsume = task.consumeBuffer(); - if (toConsume == null) { - return; - } - try { - long estimatedMergeSize = estimateRamBytesUsedForReduce(estimatedTotalSize); - addEstimateAndMaybeBreak(estimatedMergeSize); - estimatedTotalSize += estimatedMergeSize; - ++numReducePhases; - newMerge = partialReduce(toConsume, task.emptyResults, topDocsStats, thisMergeResult, numReducePhases); - } catch (Exception t) { - for (QuerySearchResult result : toConsume) { - result.releaseAggs(); + MergeTask mergeTask = task; + QuerySearchResult[] toConsume = mergeTask.consumeBuffer(); + while (mergeTask != null) { + final MergeResult thisMergeResult = mergeResult; + long estimatedTotalSize = (thisMergeResult != null ? thisMergeResult.estimatedSize : 0) + mergeTask.aggsBufferSize; + final MergeResult newMerge; + try { + long estimatedMergeSize = estimateRamBytesUsedForReduce(estimatedTotalSize); + addEstimateAndMaybeBreak(estimatedMergeSize); + estimatedTotalSize += estimatedMergeSize; + ++numReducePhases; + newMerge = partialReduce(toConsume, mergeTask.emptyResults, topDocsStats, thisMergeResult, numReducePhases); + } catch (Exception t) { + QueryPhaseResultConsumer.releaseAggs(toConsume); + onMergeFailure(t); + return; + } + synchronized (QueryPhaseResultConsumer.this) { + if (hasFailure()) { + return; + } + mergeResult = newMerge; + if (hasAggs) { + // Update the circuit breaker to remove the size of the source aggregations + // and replace the estimation with the serialized size of the newly reduced result. + long newSize = mergeResult.estimatedSize - estimatedTotalSize; + addWithoutBreaking(newSize); + if (logger.isTraceEnabled()) { + logger.trace( + "aggs partial reduction [{}->{}] max [{}]", + estimatedTotalSize, + mergeResult.estimatedSize, + maxAggsCurrentBufferSize + ); + } + } + } + Runnable r = mergeTask.consumeListener(); + synchronized (QueryPhaseResultConsumer.this) { + while (true) { + mergeTask = queue.poll(); + runningTask.set(mergeTask); + if (mergeTask == null) { + break; + } + toConsume = mergeTask.consumeBuffer(); + if (toConsume != null) { + break; + } + } + } + if (r != null) { + r.run(); } - onMergeFailure(t); - return; } - onAfterMerge(task, newMerge, estimatedTotalSize); - tryExecuteNext(); } @Override @@ -483,43 +527,6 @@ public void onFailure(Exception exc) { }); } - public synchronized TopDocsStats consumeTopDocsStats() { - for (QuerySearchResult result : buffer) { - topDocsStats.add(result.topDocs(), result.searchTimedOut(), result.terminatedEarly()); - } - return topDocsStats; - } - - public synchronized List consumeTopDocs() { - if (hasTopDocs == false) { - return Collections.emptyList(); - } - List topDocsList = new ArrayList<>(); - if (mergeResult != null) { - topDocsList.add(mergeResult.reducedTopDocs); - } - for (QuerySearchResult result : buffer) { - TopDocsAndMaxScore topDocs = result.consumeTopDocs(); - setShardIndex(topDocs.topDocs, result.getShardIndex()); - topDocsList.add(topDocs.topDocs); - } - return topDocsList; - } - - public synchronized List> getAggs() { - if (hasAggs == false) { - return Collections.emptyList(); - } - List> aggsList = new ArrayList<>(); - if (mergeResult != null) { - aggsList.add(DelayableWriteable.referencing(mergeResult.reducedAggs)); - } - for (QuerySearchResult result : buffer) { - aggsList.add(result.getAggs()); - } - return aggsList; - } - public synchronized void releaseAggs() { if (hasAggs) { for (QuerySearchResult result : buffer) { @@ -529,6 +536,12 @@ public synchronized void releaseAggs() { } } + private static void releaseAggs(QuerySearchResult... toConsume) { + for (QuerySearchResult result : toConsume) { + result.releaseAggs(); + } + } + private record MergeResult( List processedShards, TopDocs reducedTopDocs, @@ -555,21 +568,21 @@ public synchronized QuerySearchResult[] consumeBuffer() { return toRet; } - public void consumeListener() { - if (next != null) { - next.run(); - next = null; - } + public synchronized Runnable consumeListener() { + Runnable n = next; + next = null; + return n; } - public synchronized void cancel() { + public void cancel() { QuerySearchResult[] buffer = consumeBuffer(); if (buffer != null) { - for (QuerySearchResult result : buffer) { - result.releaseAggs(); - } + releaseAggs(buffer); + } + Runnable next = consumeListener(); + if (next != null) { + next.run(); } - consumeListener(); } } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index ca9c4ab44c423..b118c2560925e 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.lucene.grouping.TopFieldGroups; import org.elasticsearch.search.DocValueFormat; @@ -190,7 +191,7 @@ public static List mergeKnnResults(SearchRequest request, List topDocs, + final List topDocs, int from, int size, List reducedCompletionSuggestions @@ -233,22 +234,22 @@ static SortedTopDocs sortDocs( return new SortedTopDocs(scoreDocs, isSortedByField, sortFields, groupField, groupValues, numSuggestDocs); } - static TopDocs mergeTopDocs(Collection results, int topN, int from) { + static TopDocs mergeTopDocs(List results, int topN, int from) { if (results.isEmpty()) { return null; } - final TopDocs topDocs = results.stream().findFirst().get(); + final TopDocs topDocs = results.getFirst(); final TopDocs mergedTopDocs; final int numShards = results.size(); if (numShards == 1 && from == 0) { // only one shard and no pagination we can just return the topDocs as we got them. return topDocs; } else if (topDocs instanceof TopFieldGroups firstTopDocs) { final Sort sort = new Sort(firstTopDocs.fields); - final TopFieldGroups[] shardTopDocs = results.toArray(new TopFieldGroups[numShards]); + final TopFieldGroups[] shardTopDocs = results.toArray(new TopFieldGroups[0]); mergedTopDocs = TopFieldGroups.merge(sort, from, topN, shardTopDocs, false); } else if (topDocs instanceof TopFieldDocs firstTopDocs) { final Sort sort = checkSameSortTypes(results, firstTopDocs.fields); - final TopFieldDocs[] shardTopDocs = results.toArray(new TopFieldDocs[numShards]); + final TopFieldDocs[] shardTopDocs = results.toArray(new TopFieldDocs[0]); mergedTopDocs = TopDocs.merge(sort, from, topN, shardTopDocs); } else { final TopDocs[] shardTopDocs = results.toArray(new TopDocs[numShards]); @@ -524,17 +525,7 @@ public AggregationReduceContext forFinalReduction() { topDocs.add(td.topDocs); } } - return reducedQueryPhase( - queryResults, - Collections.emptyList(), - topDocs, - topDocsStats, - 0, - true, - aggReduceContextBuilder, - null, - true - ); + return reducedQueryPhase(queryResults, null, topDocs, topDocsStats, 0, true, aggReduceContextBuilder, null, true); } /** @@ -548,7 +539,7 @@ public AggregationReduceContext forFinalReduction() { */ static ReducedQueryPhase reducedQueryPhase( Collection queryResults, - List> bufferedAggs, + @Nullable List> bufferedAggs, List bufferedTopDocs, TopDocsStats topDocsStats, int numReducePhases, @@ -642,7 +633,12 @@ static ReducedQueryPhase reducedQueryPhase( reducedSuggest = new Suggest(Suggest.reduce(groupedSuggestions)); reducedCompletionSuggestions = reducedSuggest.filter(CompletionSuggestion.class); } - final InternalAggregations aggregations = reduceAggs(aggReduceContextBuilder, performFinalReduce, bufferedAggs); + final InternalAggregations aggregations = bufferedAggs == null + ? null + : InternalAggregations.topLevelReduceDelayable( + bufferedAggs, + performFinalReduce ? aggReduceContextBuilder.forFinalReduction() : aggReduceContextBuilder.forPartialReduction() + ); final SearchProfileResultsBuilder profileBuilder = profileShardResults.isEmpty() ? null : new SearchProfileResultsBuilder(profileShardResults); @@ -681,19 +677,6 @@ static ReducedQueryPhase reducedQueryPhase( ); } - private static InternalAggregations reduceAggs( - AggregationReduceContext.Builder aggReduceContextBuilder, - boolean performFinalReduce, - List> toReduce - ) { - return toReduce.isEmpty() - ? null - : InternalAggregations.topLevelReduceDelayable( - toReduce, - performFinalReduce ? aggReduceContextBuilder.forFinalReduction() : aggReduceContextBuilder.forPartialReduction() - ); - } - /** * Checks that query results from all shards have consistent unsigned_long format. * Sort queries on a field that has long type in one index, and unsigned_long in another index From a02f68217a5bfb226fbcd3b26cfc2b125806be94 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 25 Oct 2024 08:53:27 +0200 Subject: [PATCH 45/60] Lazy initialize HttpRouteStatsTracker in MethodHandlers (#114107) We use about 1M for the route stats trackers instances per ES instance. Making this lazy init should come at a trivial overhead and in fact makes the computation of the node stats cheaper by saving spurious sums on 0-valued long adders. --- .../elasticsearch/http/HttpRouteStats.java | 2 + .../elasticsearch/rest/MethodHandlers.java | 42 ++++++++++++++----- .../elasticsearch/rest/RestController.java | 25 +++++------ 3 files changed, 46 insertions(+), 23 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/http/HttpRouteStats.java b/server/src/main/java/org/elasticsearch/http/HttpRouteStats.java index 5be1ae9312c46..a15b929fd3c1b 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpRouteStats.java +++ b/server/src/main/java/org/elasticsearch/http/HttpRouteStats.java @@ -49,6 +49,8 @@ public record HttpRouteStats( long[] responseTimeHistogram ) implements Writeable, ToXContentObject { + public static final HttpRouteStats EMPTY = new HttpRouteStats(0, 0, new long[0], 0, 0, new long[0], new long[0]); + public HttpRouteStats(StreamInput in) throws IOException { this(in.readVLong(), in.readVLong(), in.readVLongArray(), in.readVLong(), in.readVLong(), in.readVLongArray(), in.readVLongArray()); } diff --git a/server/src/main/java/org/elasticsearch/rest/MethodHandlers.java b/server/src/main/java/org/elasticsearch/rest/MethodHandlers.java index a947ddce2b9f3..2f53f48f9ae5b 100644 --- a/server/src/main/java/org/elasticsearch/rest/MethodHandlers.java +++ b/server/src/main/java/org/elasticsearch/rest/MethodHandlers.java @@ -13,6 +13,8 @@ import org.elasticsearch.http.HttpRouteStats; import org.elasticsearch.http.HttpRouteStatsTracker; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.VarHandle; import java.util.EnumMap; import java.util.Map; import java.util.Set; @@ -25,7 +27,18 @@ final class MethodHandlers { private final String path; private final Map> methodHandlers; - private final HttpRouteStatsTracker statsTracker = new HttpRouteStatsTracker(); + @SuppressWarnings("unused") // only accessed via #STATS_TRACKER_HANDLE, lazy initialized because instances consume non-trivial heap + private volatile HttpRouteStatsTracker statsTracker; + + private static final VarHandle STATS_TRACKER_HANDLE; + + static { + try { + STATS_TRACKER_HANDLE = MethodHandles.lookup().findVarHandle(MethodHandlers.class, "statsTracker", HttpRouteStatsTracker.class); + } catch (NoSuchFieldException | IllegalAccessException e) { + throw new ExceptionInInitializerError(e); + } + } MethodHandlers(String path) { this.path = path; @@ -73,19 +86,26 @@ Set getValidMethods() { return methodHandlers.keySet(); } - public void addRequestStats(int contentLength) { - statsTracker.addRequestStats(contentLength); - } - - public void addResponseStats(long contentLength) { - statsTracker.addResponseStats(contentLength); + public HttpRouteStats getStats() { + var tracker = existingStatsTracker(); + if (tracker == null) { + return HttpRouteStats.EMPTY; + } + return tracker.getStats(); } - public void addResponseTime(long timeMillis) { - statsTracker.addResponseTime(timeMillis); + public HttpRouteStatsTracker statsTracker() { + var tracker = existingStatsTracker(); + if (tracker == null) { + var newTracker = new HttpRouteStatsTracker(); + if ((tracker = (HttpRouteStatsTracker) STATS_TRACKER_HANDLE.compareAndExchange(this, null, newTracker)) == null) { + tracker = newTracker; + } + } + return tracker; } - public HttpRouteStats getStats() { - return statsTracker.getStats(); + private HttpRouteStatsTracker existingStatsTracker() { + return (HttpRouteStatsTracker) STATS_TRACKER_HANDLE.getAcquire(this); } } diff --git a/server/src/main/java/org/elasticsearch/rest/RestController.java b/server/src/main/java/org/elasticsearch/rest/RestController.java index c2064fdd931de..7446ec5bb6717 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestController.java +++ b/server/src/main/java/org/elasticsearch/rest/RestController.java @@ -36,6 +36,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.http.HttpHeadersValidationException; import org.elasticsearch.http.HttpRouteStats; +import org.elasticsearch.http.HttpRouteStatsTracker; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.rest.RestHandler.Route; @@ -879,7 +880,7 @@ public void sendResponse(RestResponse response) { private static final class ResourceHandlingHttpChannel extends DelegatingRestChannel { private final CircuitBreakerService circuitBreakerService; private final int contentLength; - private final MethodHandlers methodHandlers; + private final HttpRouteStatsTracker statsTracker; private final long startTime; private final AtomicBoolean closed = new AtomicBoolean(); @@ -892,7 +893,7 @@ private static final class ResourceHandlingHttpChannel extends DelegatingRestCha super(delegate); this.circuitBreakerService = circuitBreakerService; this.contentLength = contentLength; - this.methodHandlers = methodHandlers; + this.statsTracker = methodHandlers.statsTracker(); this.startTime = rawRelativeTimeInMillis(); } @@ -901,12 +902,12 @@ public void sendResponse(RestResponse response) { boolean success = false; try { close(); - methodHandlers.addRequestStats(contentLength); - methodHandlers.addResponseTime(rawRelativeTimeInMillis() - startTime); + statsTracker.addRequestStats(contentLength); + statsTracker.addResponseTime(rawRelativeTimeInMillis() - startTime); if (response.isChunked() == false) { - methodHandlers.addResponseStats(response.content().length()); + statsTracker.addResponseStats(response.content().length()); } else { - final var responseLengthRecorder = new ResponseLengthRecorder(methodHandlers); + final var responseLengthRecorder = new ResponseLengthRecorder(statsTracker); final var headers = response.getHeaders(); response = RestResponse.chunked( response.status(), @@ -941,11 +942,11 @@ private void close() { } } - private static class ResponseLengthRecorder extends AtomicReference implements Releasable { + private static class ResponseLengthRecorder extends AtomicReference implements Releasable { private long responseLength; - private ResponseLengthRecorder(MethodHandlers methodHandlers) { - super(methodHandlers); + private ResponseLengthRecorder(HttpRouteStatsTracker routeStatsTracker) { + super(routeStatsTracker); } @Override @@ -953,11 +954,11 @@ public void close() { // closed just before sending the last chunk, and also when the whole RestResponse is closed since the client might abort the // connection before we send the last chunk, in which case we won't have recorded the response in the // stats yet; thus we need run-once semantics here: - final var methodHandlers = getAndSet(null); - if (methodHandlers != null) { + final var routeStatsTracker = getAndSet(null); + if (routeStatsTracker != null) { // if we started sending chunks then we're closed on the transport worker, no need for sync assert responseLength == 0L || Transports.assertTransportThread(); - methodHandlers.addResponseStats(responseLength); + routeStatsTracker.addResponseStats(responseLength); } } From ca4009e29823ae3eaaad26b75d8bb47ade5e218c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Fri, 25 Oct 2024 09:13:18 +0200 Subject: [PATCH 46/60] [DOCS] Adds stream inference API docs (#115333) Co-authored-by: Pat Whelan --- .../inference/inference-apis.asciidoc | 2 + .../inference/stream-inference.asciidoc | 122 ++++++++++++++++++ 2 files changed, 124 insertions(+) create mode 100644 docs/reference/inference/stream-inference.asciidoc diff --git a/docs/reference/inference/inference-apis.asciidoc b/docs/reference/inference/inference-apis.asciidoc index ddcff1abc7dce..1206cb02ba89a 100644 --- a/docs/reference/inference/inference-apis.asciidoc +++ b/docs/reference/inference/inference-apis.asciidoc @@ -19,6 +19,7 @@ the following APIs to manage {infer} models and perform {infer}: * <> * <> * <> +* <> * <> [[inference-landscape]] @@ -56,6 +57,7 @@ include::delete-inference.asciidoc[] include::get-inference.asciidoc[] include::post-inference.asciidoc[] include::put-inference.asciidoc[] +include::stream-inference.asciidoc[] include::update-inference.asciidoc[] include::service-alibabacloud-ai-search.asciidoc[] include::service-amazon-bedrock.asciidoc[] diff --git a/docs/reference/inference/stream-inference.asciidoc b/docs/reference/inference/stream-inference.asciidoc new file mode 100644 index 0000000000000..e66acd630cb3e --- /dev/null +++ b/docs/reference/inference/stream-inference.asciidoc @@ -0,0 +1,122 @@ +[role="xpack"] +[[stream-inference-api]] +=== Stream inference API + +Streams a chat completion response. + +IMPORTANT: The {infer} APIs enable you to use certain services, such as built-in {ml} models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. +For built-in models and models uploaded through Eland, the {infer} APIs offer an alternative way to use and manage trained models. +However, if you do not plan to use the {infer} APIs to use these models or if you want to use non-NLP models, use the <>. + + +[discrete] +[[stream-inference-api-request]] +==== {api-request-title} + +`POST /_inference//_stream` + +`POST /_inference///_stream` + + +[discrete] +[[stream-inference-api-prereqs]] +==== {api-prereq-title} + +* Requires the `monitor_inference` <> +(the built-in `inference_admin` and `inference_user` roles grant this privilege) +* You must use a client that supports streaming. + + +[discrete] +[[stream-inference-api-desc]] +==== {api-description-title} + +The stream {infer} API enables real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation. +It only works with the `completion` task type. + + +[discrete] +[[stream-inference-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) +The unique identifier of the {infer} endpoint. + + +``:: +(Optional, string) +The type of {infer} task that the model performs. + + +[discrete] +[[stream-inference-api-request-body]] +==== {api-request-body-title} + +`input`:: +(Required, string or array of strings) +The text on which you want to perform the {infer} task. +`input` can be a single string or an array. ++ +-- +[NOTE] +==== +Inference endpoints for the `completion` task type currently only support a +single string as input. +==== +-- + + +[discrete] +[[stream-inference-api-example]] +==== {api-examples-title} + +The following example performs a completion on the example question with streaming. + + +[source,console] +------------------------------------------------------------ +POST _inference/completion/openai-completion/_stream +{ + "input": "What is Elastic?" +} +------------------------------------------------------------ +// TEST[skip:TBD] + + +The API returns the following response: + + +[source,txt] +------------------------------------------------------------ +event: message +data: { + "completion":[{ + "delta":"Elastic" + }] +} + +event: message +data: { + "completion":[{ + "delta":" is" + }, + { + "delta":" a" + } + ] +} + +event: message +data: { + "completion":[{ + "delta":" software" + }, + { + "delta":" company" + }] +} + +(...) +------------------------------------------------------------ +// NOTCONSOLE From 6688fe225584cfa8d12ebb5e56662918a593f690 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Fri, 25 Oct 2024 10:26:12 +0300 Subject: [PATCH 47/60] Remove excluded tests from rest compat (#115617) --- x-pack/plugin/downsample/qa/rest/build.gradle | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/x-pack/plugin/downsample/qa/rest/build.gradle b/x-pack/plugin/downsample/qa/rest/build.gradle index 5142632a36006..ba5ac7b0c7317 100644 --- a/x-pack/plugin/downsample/qa/rest/build.gradle +++ b/x-pack/plugin/downsample/qa/rest/build.gradle @@ -32,20 +32,6 @@ tasks.named('yamlRestTest') { tasks.named('yamlRestCompatTest') { usesDefaultDistribution() } -tasks.named("yamlRestCompatTestTransform").configure ({ task -> - task.skipTest("downsample/10_basic/Downsample index with empty dimension on routing path", "Skip until pr/115358 gets backported") - task.skipTest("downsample/10_basic/Downsample histogram as label", "Skip until pr/115358 gets backported") - task.skipTest("downsample/10_basic/Downsample date timestamp field using strict_date_optional_time_nanos format", - "Skip until pr/115358 gets backported") - task.skipTest("downsample/10_basic/Downsample a downsampled index", "Skip until pr/115358 gets backported") - task.skipTest("downsample/10_basic/Downsample date_nanos timestamp field using custom format", "Skip until pr/115358 gets backported") - task.skipTest("downsample/10_basic/Downsample using coarse grained timestamp", "Skip until pr/115358 gets backported") - task.skipTest("downsample/10_basic/Downsample label with ignore_above", "Skip until pr/115358 gets backported") - task.skipTest("downsample/10_basic/Downsample object field", "Skip until pr/115358 gets backported") - task.skipTest("downsample/10_basic/Downsample empty and missing labels", "Skip until pr/115358 gets backported") - task.skipTest("downsample/10_basic/Downsample index", "Skip until pr/115358 gets backported") - task.skipTest("downsample/10_basic/Downsample index with empty dimension", "Skip until pr/115358 gets backported") -}) if (BuildParams.inFipsJvm){ // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC tasks.named("yamlRestTest").configure{enabled = false } From e7897bdeff7f4ec76e8a0801c86f5dea11cacabd Mon Sep 17 00:00:00 2001 From: Salvatore Campagna <93581129+salvatore-campagna@users.noreply.github.com> Date: Fri, 25 Oct 2024 09:57:12 +0200 Subject: [PATCH 48/60] Return `_ignored_source` as a top level array field (#115328) This PR introduces a fix for the `fields` and `stored_fields` APIs and the way `_ignored_source` field is handled: 1. **Return `_ignored_source` as a top-level array metadata field**: - The `_ignored_source` field is now returned as a top-level array in the metadata as done with other metadata fields. 2. **Return `_ignored_source` as an array of values**: - Even when there is only a single ignored field, `_ignored_source` will now be returned as an array of values. This is done to be consistent with how the `_ignored` field is returned. Without this fix, we would return the `_ignored_source` field twice, as a top-level field and as part of the `fields` array. Also, without this fix, we would only return a single value instead of all ignored field values. --- .../mapper/IgnoredSourceFieldMapper.java | 3 + .../index/mapper/MapperFeatures.java | 1 + .../org/elasticsearch/search/SearchHit.java | 3 +- .../fetch/subphase/FetchFieldsPhase.java | 25 ++- .../index/get/DocumentFieldTests.java | 5 +- .../search/SearchResponseUtils.java | 3 +- .../rest-api-spec/test/20_ignored_source.yml | 158 +++++++++++++++++- 7 files changed, 182 insertions(+), 16 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java index 70d73fc2ffb9a..7e2bebfd403cb 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java @@ -58,6 +58,9 @@ public class IgnoredSourceFieldMapper extends MetadataFieldMapper { static final NodeFeature TRACK_IGNORED_SOURCE = new NodeFeature("mapper.track_ignored_source"); static final NodeFeature DONT_EXPAND_DOTS_IN_IGNORED_SOURCE = new NodeFeature("mapper.ignored_source.dont_expand_dots"); + static final NodeFeature IGNORED_SOURCE_AS_TOP_LEVEL_METADATA_ARRAY_FIELD = new NodeFeature( + "mapper.ignored_source_as_top_level_metadata_array_field" + ); static final NodeFeature ALWAYS_STORE_OBJECT_ARRAYS_IN_NESTED_OBJECTS = new NodeFeature( "mapper.ignored_source.always_store_object_arrays_in_nested" ); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java index 026c7c98d7aeb..a5f173afffba2 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java @@ -63,6 +63,7 @@ public Set getTestFeatures() { RangeFieldMapper.DATE_RANGE_INDEXING_FIX, IgnoredSourceFieldMapper.DONT_EXPAND_DOTS_IN_IGNORED_SOURCE, SourceFieldMapper.REMOVE_SYNTHETIC_SOURCE_ONLY_VALIDATION, + IgnoredSourceFieldMapper.IGNORED_SOURCE_AS_TOP_LEVEL_METADATA_ARRAY_FIELD, IgnoredSourceFieldMapper.ALWAYS_STORE_OBJECT_ARRAYS_IN_NESTED_OBJECTS ); } diff --git a/server/src/main/java/org/elasticsearch/search/SearchHit.java b/server/src/main/java/org/elasticsearch/search/SearchHit.java index 1611c95d99df4..98f7c92d9997a 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHit.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHit.java @@ -29,6 +29,7 @@ import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.SimpleRefCounted; import org.elasticsearch.index.mapper.IgnoredFieldMapper; +import org.elasticsearch.index.mapper.IgnoredSourceFieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.seqno.SequenceNumbers; @@ -847,7 +848,7 @@ public XContentBuilder toInnerXContent(XContentBuilder builder, Params params) t } // _ignored is the only multi-valued meta field // TODO: can we avoid having an exception here? - if (field.getName().equals(IgnoredFieldMapper.NAME)) { + if (IgnoredFieldMapper.NAME.equals(field.getName()) || IgnoredSourceFieldMapper.NAME.equals(field.getName())) { builder.field(field.getName(), field.getValues()); } else { builder.field(field.getName(), field.getValue()); diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchFieldsPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchFieldsPhase.java index 03bfbd40d97be..e0cb5a668b4ab 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchFieldsPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchFieldsPhase.java @@ -57,13 +57,28 @@ public FetchSubPhaseProcessor getProcessor(FetchContext fetchContext) { return null; } + // NOTE: FieldFetcher for non-metadata fields, as well as `_id` and `_source`. + // We need to retain `_id` and `_source` here to correctly populate the `StoredFieldSpecs` created by the + // `FieldFetcher` constructor. final SearchExecutionContext searchExecutionContext = fetchContext.getSearchExecutionContext(); - final FieldFetcher fieldFetcher = fetchFieldsContext == null ? null - : fetchFieldsContext.fields() == null ? null - : fetchFieldsContext.fields().isEmpty() ? null - : FieldFetcher.create(searchExecutionContext, fetchFieldsContext.fields()); + final FieldFetcher fieldFetcher = (fetchFieldsContext == null + || fetchFieldsContext.fields() == null + || fetchFieldsContext.fields().isEmpty()) + ? null + : FieldFetcher.create( + searchExecutionContext, + fetchFieldsContext.fields() + .stream() + .filter( + fieldAndFormat -> (searchExecutionContext.isMetadataField(fieldAndFormat.field) == false + || searchExecutionContext.getFieldType(fieldAndFormat.field).isStored() == false + || IdFieldMapper.NAME.equals(fieldAndFormat.field) + || SourceFieldMapper.NAME.equals(fieldAndFormat.field)) + ) + .toList() + ); - // NOTE: Collect stored metadata fields requested via `fields` (in FetchFieldsContext`) like for instance the _ignored source field + // NOTE: Collect stored metadata fields requested via `fields` (in FetchFieldsContext) like for instance the _ignored source field final Set fetchContextMetadataFields = new HashSet<>(); if (fetchFieldsContext != null && fetchFieldsContext.fields() != null && fetchFieldsContext.fields().isEmpty() == false) { for (final FieldAndFormat fieldAndFormat : fetchFieldsContext.fields()) { diff --git a/server/src/test/java/org/elasticsearch/index/get/DocumentFieldTests.java b/server/src/test/java/org/elasticsearch/index/get/DocumentFieldTests.java index 76426e9df83d8..8a27c3545a110 100644 --- a/server/src/test/java/org/elasticsearch/index/get/DocumentFieldTests.java +++ b/server/src/test/java/org/elasticsearch/index/get/DocumentFieldTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.mapper.IgnoredFieldMapper; +import org.elasticsearch.index.mapper.IgnoredSourceFieldMapper; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.RandomObjects; @@ -122,7 +123,7 @@ public static Tuple randomDocumentField( if (isMetafield) { String metaField = randomValueOtherThanMany(excludeMetaFieldFilter, () -> randomFrom(IndicesModule.getBuiltInMetadataFields())); DocumentField documentField; - if (metaField.equals(IgnoredFieldMapper.NAME)) { + if (IgnoredFieldMapper.NAME.equals(metaField) || IgnoredSourceFieldMapper.NAME.equals(metaField)) { int numValues = randomIntBetween(1, 3); List ignoredFields = new ArrayList<>(numValues); for (int i = 0; i < numValues; i++) { @@ -130,7 +131,7 @@ public static Tuple randomDocumentField( } documentField = new DocumentField(metaField, ignoredFields); } else { - // meta fields are single value only, besides _ignored + // meta fields are single value only, besides _ignored and _ignored_source documentField = new DocumentField(metaField, Collections.singletonList(randomAlphaOfLengthBetween(3, 10))); } return Tuple.tuple(documentField, documentField); diff --git a/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java b/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java index df1ea6b756405..b0edbb829df2a 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java @@ -29,6 +29,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.mapper.IgnoredFieldMapper; +import org.elasticsearch.index.mapper.IgnoredSourceFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; @@ -83,7 +84,7 @@ public enum SearchResponseUtils { SearchHit.METADATA_FIELDS, v -> new HashMap() ); - if (fieldName.equals(IgnoredFieldMapper.NAME)) { + if (IgnoredFieldMapper.NAME.equals(fieldName) || IgnoredSourceFieldMapper.NAME.equals(fieldName)) { fieldMap.put(fieldName, new DocumentField(fieldName, (List) fieldValue)); } else { fieldMap.put(fieldName, new DocumentField(fieldName, Collections.singletonList(fieldValue))); diff --git a/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/20_ignored_source.yml b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/20_ignored_source.yml index c54edb0994860..2f111d579ebb1 100644 --- a/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/20_ignored_source.yml +++ b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/20_ignored_source.yml @@ -27,6 +27,10 @@ setup: --- "fetch stored fields wildcard": + - requires: + cluster_features: [ mapper.ignored_source_as_top_level_metadata_array_field ] + reason: requires returning the _ignored_source field as a top level array metadata field + - do: headers: Content-Type: application/yaml @@ -40,6 +44,10 @@ setup: --- "fetch fields wildcard": + - requires: + cluster_features: [ mapper.ignored_source_as_top_level_metadata_array_field ] + reason: requires returning the _ignored_source field as a top level array metadata field + - do: headers: Content-Type: application/yaml @@ -53,6 +61,10 @@ setup: --- "fetch stored fields by name": + - requires: + cluster_features: [ mapper.ignored_source_as_top_level_metadata_array_field ] + reason: requires returning the _ignored_source field as a top level array metadata field + - do: headers: Content-Type: application/yaml @@ -62,10 +74,14 @@ setup: stored_fields: [ _ignored_source ] - match: { hits.total.value: 1 } - - match: { hits.hits.0._ignored_source: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } + - match: { hits.hits.0._ignored_source.0: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } --- "fetch fields by name": + - requires: + cluster_features: [ mapper.ignored_source_as_top_level_metadata_array_field ] + reason: requires returning the _ignored_source field as a top level array metadata field + - do: headers: Content-Type: application/yaml @@ -75,10 +91,14 @@ setup: fields: [ _ignored_source ] - match: { hits.total.value: 1 } - - match: { hits.hits.0._ignored_source: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } + - match: { hits.hits.0._ignored_source.0: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } --- "fields and stored fields combination": + - requires: + cluster_features: [ mapper.ignored_source_as_top_level_metadata_array_field ] + reason: requires returning the _ignored_source field as a top level array metadata field + - do: headers: Content-Type: application/yaml @@ -92,10 +112,14 @@ setup: - match: { hits.total.value: 1 } - match: { hits.hits.0.fields.object: null } - - match: { hits.hits.0._ignored_source: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } + - match: { hits.hits.0._ignored_source.0: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } --- "wildcard fields and stored fields combination": + - requires: + cluster_features: [ mapper.ignored_source_as_top_level_metadata_array_field ] + reason: requires returning the _ignored_source field as a top level array metadata field + - do: search: index: test @@ -108,6 +132,10 @@ setup: --- "fields with ignored source in stored fields": + - requires: + cluster_features: [ mapper.ignored_source_as_top_level_metadata_array_field ] + reason: requires returning the _ignored_source field as a top level array metadata field + - do: headers: Content-Type: application/yaml @@ -118,11 +146,15 @@ setup: fields: [ object ] - match: { hits.total.value: 1 } - - match: { hits.hits.0._ignored_source: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } + - match: { hits.hits.0._ignored_source.0: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } - match: { hits.hits.0.fields: null } --- "fields with ignored source in fields": + - requires: + cluster_features: [ mapper.ignored_source_as_top_level_metadata_array_field ] + reason: requires returning the _ignored_source field as a top level array metadata field + - do: headers: Content-Type: application/yaml @@ -133,10 +165,14 @@ setup: fields: [ _ignored_source ] - match: { hits.total.value: 1 } - - match: { hits.hits.0._ignored_source: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } + - match: { hits.hits.0._ignored_source.0: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } --- "ignored source via fields and wildcard stored fields": + - requires: + cluster_features: [ mapper.ignored_source_as_top_level_metadata_array_field ] + reason: requires returning the _ignored_source field as a top level array metadata field + - do: headers: Content-Type: application/yaml @@ -147,10 +183,14 @@ setup: fields: [ _ignored_source ] - match: { hits.total.value: 1 } - - match: { hits.hits.0._ignored_source: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } + - match: { hits.hits.0._ignored_source.0: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } --- "wildcard fields and ignored source via stored fields": + - requires: + cluster_features: [ mapper.ignored_source_as_top_level_metadata_array_field ] + reason: requires returning the _ignored_source field as a top level array metadata field + - do: headers: Content-Type: application/yaml @@ -161,4 +201,108 @@ setup: fields: [ "*" ] - match: { hits.total.value: 1 } - - match: { hits.hits.0._ignored_source: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } + - match: { hits.hits.0._ignored_source.0: !!binary "BgAAAG9iamVjdHktLS0KbmFtZTogImZvbyIKdmFsdWU6IDEwCg==" } + +--- +ignored source array via fields: + - requires: + cluster_features: [mapper.ignored_source_as_top_level_metadata_array_field] + reason: requires returning the _ignored_source field as a top level array metadata field + + - do: + indices.create: + index: test-dynamic-fields + body: + settings: + index: + mapping: + source: + mode: synthetic + total_fields: + ignore_dynamic_beyond_limit: true + limit: 1 # only `name` static mapping is allowed + mappings: + properties: + name: + type: keyword + + - do: + bulk: + index: test-dynamic-fields + refresh: true + body: + - '{ "index": { } }' + - '{ "name": "foo", "value": 1, "id": "f5t7-66gt" }' + - match: { errors: false } + + - do: + headers: + Content-Type: application/yaml + search: + index: test-dynamic-fields + body: + fields: [ "_ignored_source" ] + query: + match_all: {} + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._source.name: "foo" } + - match: { hits.hits.0._source.value: 1 } + - match: { hits.hits.0._source.id: "f5t7-66gt" } + - match: { hits.hits.0._ignored: [ "id", "value" ]} + - length: { hits.hits.0._ignored_source: 2 } + - match: { hits.hits.0._ignored_source.0: !!binary "AgAAAGlkU2Y1dDctNjZndA==" } # `id` field + - match: { hits.hits.0._ignored_source.1: !!binary "BQAAAHZhbHVlSQEAAAA=" } # `value` field + +--- +ignored source array via stored_fields: + - requires: + cluster_features: [mapper.ignored_source_as_top_level_metadata_array_field] + reason: requires returning the _ignored_source field as a top level array metadata field + + - do: + indices.create: + index: test-dynamic-stored-fields + body: + settings: + index: + mapping: + source: + mode: synthetic + total_fields: + ignore_dynamic_beyond_limit: true + limit: 1 # only `name` static mapping is allowed + mappings: + properties: + name: + type: keyword + + - do: + bulk: + index: test-dynamic-stored-fields + refresh: true + body: + - '{ "index": { } }' + - '{ "name": "foo", "value": 1, "id": "f5t7-66gt" }' + - match: { errors: false } + + - do: + headers: + Content-Type: application/yaml + search: + index: test-dynamic-stored-fields + body: + # NOTE: when using synthetic source `_source` field needs to be explicitly requested via `stored_fields`, + # a wildcard request would not include it. + stored_fields: [ "_ignored_source", "_source" ] + query: + match_all: {} + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._source.name: "foo" } + - match: { hits.hits.0._source.value: 1 } + - match: { hits.hits.0._source.id: "f5t7-66gt" } + - match: { hits.hits.0._ignored: [ "id", "value" ]} + - length: { hits.hits.0._ignored_source: 2 } + - match: { hits.hits.0._ignored_source.0: !!binary "AgAAAGlkU2Y1dDctNjZndA==" } # `id` field + - match: { hits.hits.0._ignored_source.1: !!binary "BQAAAHZhbHVlSQEAAAA=" } # `value` field From 3d307e0d7867116585dccfb335e0cab0c192bdb9 Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Fri, 25 Oct 2024 10:09:53 +0200 Subject: [PATCH 49/60] Don't return TEXT type for functions that take TEXT (#114334) Always return `KEYWORD` for functions that previously returned `TEXT`, because any change to the value, no matter how small, is enough to render meaningless the original analyzer associated with the `TEXT` field value. In principle, if the attribute is no longer the original `FieldAttribute`, it can no longer claim to have the type `TEXT`. This has been done for all functions: conversion functions, aggregating functions, multi-value functions. There were several that already produced `KEYWORD` for `TEXT` input (eg. ToString, FromBase64 and ToBase64, MvZip, ToLower, ToUpper, DateFormat, Concat, Left, Repeat, Replace, Right, Split, Substring), but many others that incorrectly claimed to produce `TEXT`, while this was really a false claim. This PR makes that now strict, and includes changes to the functions' units tests to disallow the tests to expect any functions output to be `TEXT`. One side effect of this change is that methods that take multiple parameters that require all of them to have the same type, will now treat TEXT and KEYWORD the same. This was already the case for functions like `Concat`, but is now also the case for `Greatest`, `Least`, `Case`, `Coalesce` and `MvAppend`. An associated change is that the type casting operator `::text` has been entirely removed. It used to map onto the `ToString` function which returned type KEYWORD, and so `::text` really produced a `KEYWORD`, which is a lie, or at least a `bug`, which is now fixed. Should we ever wish to actually produce real `TEXT`, we might love the fact that this operator has been freed up for future use (although it seems likely that function will require parameters to specify the analyzer, so might never be an operator again). ### Backwards compatibility issues: This is a change that will fail BWC tests, since we have many tests that assert on TEXT output to functions. For this reason we needed to block two scenarios: * We used the capability `functions_never_emit_text` to prevent 7 csv-spec tests and 2 yaml tests from being run against older versions that still emit text. * We used `skipTest` to also block those two yaml tests from being run against the latest build, but using older yaml files downloaded (as far back as 8.14). In all cases the change observed in these tests was simply the results columns no longer having `text` type, and instead being `keyword`. --------- Co-authored-by: Luigi Dell'Aquila --- docs/changelog/114334.yaml | 7 +++ .../functions/kibana/definition/case.json | 52 +++++++++++++++- .../functions/kibana/definition/coalesce.json | 4 +- .../functions/kibana/definition/greatest.json | 4 +- .../functions/kibana/definition/least.json | 4 +- .../functions/kibana/definition/ltrim.json | 2 +- .../esql/functions/kibana/definition/max.json | 2 +- .../esql/functions/kibana/definition/min.json | 2 +- .../kibana/definition/mv_append.json | 2 +- .../kibana/definition/mv_dedupe.json | 2 +- .../functions/kibana/definition/mv_first.json | 2 +- .../functions/kibana/definition/mv_last.json | 2 +- .../functions/kibana/definition/mv_max.json | 2 +- .../functions/kibana/definition/mv_min.json | 2 +- .../functions/kibana/definition/mv_slice.json | 2 +- .../functions/kibana/definition/mv_sort.json | 2 +- .../functions/kibana/definition/reverse.json | 2 +- .../functions/kibana/definition/rtrim.json | 2 +- .../functions/kibana/definition/to_lower.json | 2 +- .../functions/kibana/definition/to_upper.json | 2 +- .../esql/functions/kibana/definition/top.json | 2 +- .../functions/kibana/definition/trim.json | 2 +- .../functions/kibana/definition/values.json | 2 +- .../esql/functions/kibana/inline_cast.json | 1 - .../esql/functions/types/case.asciidoc | 6 +- .../esql/functions/types/coalesce.asciidoc | 4 +- .../esql/functions/types/greatest.asciidoc | 4 +- .../esql/functions/types/least.asciidoc | 4 +- .../esql/functions/types/ltrim.asciidoc | 2 +- .../esql/functions/types/max.asciidoc | 2 +- .../esql/functions/types/min.asciidoc | 2 +- .../esql/functions/types/mv_append.asciidoc | 2 +- .../esql/functions/types/mv_dedupe.asciidoc | 2 +- .../esql/functions/types/mv_first.asciidoc | 2 +- .../esql/functions/types/mv_last.asciidoc | 2 +- .../esql/functions/types/mv_max.asciidoc | 2 +- .../esql/functions/types/mv_min.asciidoc | 2 +- .../esql/functions/types/mv_slice.asciidoc | 2 +- .../esql/functions/types/mv_sort.asciidoc | 2 +- .../esql/functions/types/reverse.asciidoc | 2 +- .../esql/functions/types/rtrim.asciidoc | 2 +- .../esql/functions/types/to_lower.asciidoc | 2 +- .../esql/functions/types/to_upper.asciidoc | 2 +- .../esql/functions/types/top.asciidoc | 2 +- .../esql/functions/types/trim.asciidoc | 2 +- .../esql/functions/types/values.asciidoc | 2 +- x-pack/plugin/build.gradle | 2 + .../xpack/esql/core/type/DataType.java | 4 ++ .../src/main/resources/convert.csv-spec | 6 +- .../src/main/resources/stats.csv-spec | 14 +++-- .../src/main/resources/stats_top.csv-spec | 6 +- .../src/main/resources/string.csv-spec | 3 +- .../xpack/esql/action/EsqlCapabilities.java | 5 ++ .../expression/function/aggregate/Max.java | 4 +- .../expression/function/aggregate/Min.java | 4 +- .../expression/function/aggregate/Top.java | 4 +- .../expression/function/aggregate/Values.java | 4 +- .../function/scalar/UnaryScalarFunction.java | 2 +- .../function/scalar/conditional/Case.java | 5 +- .../function/scalar/conditional/Greatest.java | 6 +- .../function/scalar/conditional/Least.java | 6 +- .../function/scalar/multivalue/MvAppend.java | 7 +-- .../function/scalar/multivalue/MvDedupe.java | 1 - .../function/scalar/multivalue/MvFirst.java | 1 - .../function/scalar/multivalue/MvLast.java | 1 - .../function/scalar/multivalue/MvMax.java | 2 +- .../function/scalar/multivalue/MvMin.java | 2 +- .../function/scalar/multivalue/MvSlice.java | 3 +- .../function/scalar/multivalue/MvSort.java | 4 +- .../function/scalar/nulls/Coalesce.java | 5 +- .../function/scalar/string/LTrim.java | 2 +- .../function/scalar/string/RTrim.java | 2 +- .../function/scalar/string/Reverse.java | 2 +- .../function/scalar/string/ToLower.java | 4 +- .../function/scalar/string/ToUpper.java | 4 +- .../function/scalar/string/Trim.java | 2 +- .../esql/type/EsqlDataTypeConverter.java | 1 - .../xpack/esql/analysis/ParsingTests.java | 3 - .../expression/function/TestCaseSupplier.java | 2 +- .../function/aggregate/MaxTests.java | 2 +- .../function/aggregate/MinTests.java | 2 +- .../scalar/conditional/CaseTests.java | 59 ++++++++++++++++++- .../function/scalar/string/ToLowerTests.java | 2 +- .../function/scalar/string/ToUpperTests.java | 2 +- .../rest-api-spec/test/esql/80_text.yml | 24 ++++++-- 85 files changed, 253 insertions(+), 123 deletions(-) create mode 100644 docs/changelog/114334.yaml diff --git a/docs/changelog/114334.yaml b/docs/changelog/114334.yaml new file mode 100644 index 0000000000000..d0fefe40c6970 --- /dev/null +++ b/docs/changelog/114334.yaml @@ -0,0 +1,7 @@ +pr: 114334 +summary: Don't return TEXT type for functions that take TEXT +area: ES|QL +type: bug +issues: + - 111537 + - 114333 diff --git a/docs/reference/esql/functions/kibana/definition/case.json b/docs/reference/esql/functions/kibana/definition/case.json index 1cf2c6ce7a579..bf498f690551c 100644 --- a/docs/reference/esql/functions/kibana/definition/case.json +++ b/docs/reference/esql/functions/kibana/definition/case.json @@ -424,6 +424,30 @@ "variadic" : true, "returnType" : "keyword" }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "keyword", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "text", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." + } + ], + "variadic" : true, + "returnType" : "keyword" + }, { "params" : [ { @@ -482,7 +506,31 @@ } ], "variadic" : true, - "returnType" : "text" + "returnType" : "keyword" + }, + { + "params" : [ + { + "name" : "condition", + "type" : "boolean", + "optional" : false, + "description" : "A condition." + }, + { + "name" : "trueValue", + "type" : "text", + "optional" : false, + "description" : "The value that's returned when the corresponding condition is the first to evaluate to `true`. The default value is returned when no condition matches." + }, + { + "name" : "elseValue", + "type" : "keyword", + "optional" : true, + "description" : "The value that's returned when no condition evaluates to `true`." + } + ], + "variadic" : true, + "returnType" : "keyword" }, { "params" : [ @@ -506,7 +554,7 @@ } ], "variadic" : true, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/coalesce.json b/docs/reference/esql/functions/kibana/definition/coalesce.json index 9ebc5a97229cd..7f49195190951 100644 --- a/docs/reference/esql/functions/kibana/definition/coalesce.json +++ b/docs/reference/esql/functions/kibana/definition/coalesce.json @@ -242,7 +242,7 @@ } ], "variadic" : true, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ @@ -260,7 +260,7 @@ } ], "variadic" : true, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/greatest.json b/docs/reference/esql/functions/kibana/definition/greatest.json index 2818a5ac56339..eebb4fad1eb1d 100644 --- a/docs/reference/esql/functions/kibana/definition/greatest.json +++ b/docs/reference/esql/functions/kibana/definition/greatest.json @@ -189,7 +189,7 @@ } ], "variadic" : true, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ @@ -207,7 +207,7 @@ } ], "variadic" : true, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/least.json b/docs/reference/esql/functions/kibana/definition/least.json index 7b545896f4ddc..02fa58f92eaef 100644 --- a/docs/reference/esql/functions/kibana/definition/least.json +++ b/docs/reference/esql/functions/kibana/definition/least.json @@ -188,7 +188,7 @@ } ], "variadic" : true, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ @@ -206,7 +206,7 @@ } ], "variadic" : true, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/ltrim.json b/docs/reference/esql/functions/kibana/definition/ltrim.json index e85c2d42dedee..6d992b9db7b2c 100644 --- a/docs/reference/esql/functions/kibana/definition/ltrim.json +++ b/docs/reference/esql/functions/kibana/definition/ltrim.json @@ -26,7 +26,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/max.json b/docs/reference/esql/functions/kibana/definition/max.json index 09ca95a0afeff..45fd26571b091 100644 --- a/docs/reference/esql/functions/kibana/definition/max.json +++ b/docs/reference/esql/functions/kibana/definition/max.json @@ -98,7 +98,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/min.json b/docs/reference/esql/functions/kibana/definition/min.json index 3e87b3e9038e1..ae71fba049dbe 100644 --- a/docs/reference/esql/functions/kibana/definition/min.json +++ b/docs/reference/esql/functions/kibana/definition/min.json @@ -98,7 +98,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_append.json b/docs/reference/esql/functions/kibana/definition/mv_append.json index c14a3337a25a7..81c1b777be498 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_append.json +++ b/docs/reference/esql/functions/kibana/definition/mv_append.json @@ -218,7 +218,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_dedupe.json b/docs/reference/esql/functions/kibana/definition/mv_dedupe.json index 9bb0935c6a5de..bfca58bc3e140 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_dedupe.json +++ b/docs/reference/esql/functions/kibana/definition/mv_dedupe.json @@ -147,7 +147,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_first.json b/docs/reference/esql/functions/kibana/definition/mv_first.json index 80e761faafab9..a2b6358023e4b 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_first.json +++ b/docs/reference/esql/functions/kibana/definition/mv_first.json @@ -146,7 +146,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_last.json b/docs/reference/esql/functions/kibana/definition/mv_last.json index fb16400f86e62..b6dc268af5305 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_last.json +++ b/docs/reference/esql/functions/kibana/definition/mv_last.json @@ -146,7 +146,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_max.json b/docs/reference/esql/functions/kibana/definition/mv_max.json index 17cdae8a3d39c..27d2b010dc02c 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_max.json +++ b/docs/reference/esql/functions/kibana/definition/mv_max.json @@ -98,7 +98,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_min.json b/docs/reference/esql/functions/kibana/definition/mv_min.json index 3718a0f6e1de5..410e97335687f 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_min.json +++ b/docs/reference/esql/functions/kibana/definition/mv_min.json @@ -98,7 +98,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_slice.json b/docs/reference/esql/functions/kibana/definition/mv_slice.json index 399a6145b040e..dbbfe0ffb5a78 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_slice.json +++ b/docs/reference/esql/functions/kibana/definition/mv_slice.json @@ -290,7 +290,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/mv_sort.json b/docs/reference/esql/functions/kibana/definition/mv_sort.json index c78ade7c8a94f..4cb255fb0afcb 100644 --- a/docs/reference/esql/functions/kibana/definition/mv_sort.json +++ b/docs/reference/esql/functions/kibana/definition/mv_sort.json @@ -146,7 +146,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/definition/reverse.json b/docs/reference/esql/functions/kibana/definition/reverse.json index 1b222691530f2..0652d9cfa6b15 100644 --- a/docs/reference/esql/functions/kibana/definition/reverse.json +++ b/docs/reference/esql/functions/kibana/definition/reverse.json @@ -26,7 +26,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/rtrim.json b/docs/reference/esql/functions/kibana/definition/rtrim.json index 028f442de9632..9c8a7578ed789 100644 --- a/docs/reference/esql/functions/kibana/definition/rtrim.json +++ b/docs/reference/esql/functions/kibana/definition/rtrim.json @@ -26,7 +26,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/to_lower.json b/docs/reference/esql/functions/kibana/definition/to_lower.json index f9b49a29a8c7d..07bb057fe080d 100644 --- a/docs/reference/esql/functions/kibana/definition/to_lower.json +++ b/docs/reference/esql/functions/kibana/definition/to_lower.json @@ -26,7 +26,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/to_upper.json b/docs/reference/esql/functions/kibana/definition/to_upper.json index edf36a982f56b..caa9d563b08b1 100644 --- a/docs/reference/esql/functions/kibana/definition/to_upper.json +++ b/docs/reference/esql/functions/kibana/definition/to_upper.json @@ -26,7 +26,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/top.json b/docs/reference/esql/functions/kibana/definition/top.json index 7fa4ff123eec7..82bd80636152c 100644 --- a/docs/reference/esql/functions/kibana/definition/top.json +++ b/docs/reference/esql/functions/kibana/definition/top.json @@ -194,7 +194,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/trim.json b/docs/reference/esql/functions/kibana/definition/trim.json index 6edf13e588e62..45805b3bfb054 100644 --- a/docs/reference/esql/functions/kibana/definition/trim.json +++ b/docs/reference/esql/functions/kibana/definition/trim.json @@ -26,7 +26,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" } ], "examples" : [ diff --git a/docs/reference/esql/functions/kibana/definition/values.json b/docs/reference/esql/functions/kibana/definition/values.json index e289173d9d989..ae69febd4f755 100644 --- a/docs/reference/esql/functions/kibana/definition/values.json +++ b/docs/reference/esql/functions/kibana/definition/values.json @@ -98,7 +98,7 @@ } ], "variadic" : false, - "returnType" : "text" + "returnType" : "keyword" }, { "params" : [ diff --git a/docs/reference/esql/functions/kibana/inline_cast.json b/docs/reference/esql/functions/kibana/inline_cast.json index 81a1966773238..9f663c8d0d6a3 100644 --- a/docs/reference/esql/functions/kibana/inline_cast.json +++ b/docs/reference/esql/functions/kibana/inline_cast.json @@ -15,7 +15,6 @@ "keyword" : "to_string", "long" : "to_long", "string" : "to_string", - "text" : "to_string", "time_duration" : "to_timeduration", "unsigned_long" : "to_unsigned_long", "version" : "to_version" diff --git a/docs/reference/esql/functions/types/case.asciidoc b/docs/reference/esql/functions/types/case.asciidoc index e8aa3eaf5daae..c6fb6a091e9d0 100644 --- a/docs/reference/esql/functions/types/case.asciidoc +++ b/docs/reference/esql/functions/types/case.asciidoc @@ -24,11 +24,13 @@ boolean | integer | | integer boolean | ip | ip | ip boolean | ip | | ip boolean | keyword | keyword | keyword +boolean | keyword | text | keyword boolean | keyword | | keyword boolean | long | long | long boolean | long | | long -boolean | text | text | text -boolean | text | | text +boolean | text | keyword | keyword +boolean | text | text | keyword +boolean | text | | keyword boolean | unsigned_long | unsigned_long | unsigned_long boolean | unsigned_long | | unsigned_long boolean | version | version | version diff --git a/docs/reference/esql/functions/types/coalesce.asciidoc b/docs/reference/esql/functions/types/coalesce.asciidoc index 368a12db0dca4..23a249494e0a2 100644 --- a/docs/reference/esql/functions/types/coalesce.asciidoc +++ b/docs/reference/esql/functions/types/coalesce.asciidoc @@ -19,7 +19,7 @@ keyword | keyword | keyword keyword | | keyword long | long | long long | | long -text | text | text -text | | text +text | text | keyword +text | | keyword version | version | version |=== diff --git a/docs/reference/esql/functions/types/greatest.asciidoc b/docs/reference/esql/functions/types/greatest.asciidoc index 1454bbb6f81c1..7df77a6991315 100644 --- a/docs/reference/esql/functions/types/greatest.asciidoc +++ b/docs/reference/esql/functions/types/greatest.asciidoc @@ -16,7 +16,7 @@ keyword | keyword | keyword keyword | | keyword long | long | long long | | long -text | text | text -text | | text +text | text | keyword +text | | keyword version | version | version |=== diff --git a/docs/reference/esql/functions/types/least.asciidoc b/docs/reference/esql/functions/types/least.asciidoc index 1454bbb6f81c1..7df77a6991315 100644 --- a/docs/reference/esql/functions/types/least.asciidoc +++ b/docs/reference/esql/functions/types/least.asciidoc @@ -16,7 +16,7 @@ keyword | keyword | keyword keyword | | keyword long | long | long long | | long -text | text | text -text | | text +text | text | keyword +text | | keyword version | version | version |=== diff --git a/docs/reference/esql/functions/types/ltrim.asciidoc b/docs/reference/esql/functions/types/ltrim.asciidoc index 41d60049d59b8..1ba0e98ec8f09 100644 --- a/docs/reference/esql/functions/types/ltrim.asciidoc +++ b/docs/reference/esql/functions/types/ltrim.asciidoc @@ -6,5 +6,5 @@ |=== string | result keyword | keyword -text | text +text | keyword |=== diff --git a/docs/reference/esql/functions/types/max.asciidoc b/docs/reference/esql/functions/types/max.asciidoc index 35ce5811e0cd0..564fb8dc3bfb0 100644 --- a/docs/reference/esql/functions/types/max.asciidoc +++ b/docs/reference/esql/functions/types/max.asciidoc @@ -12,6 +12,6 @@ integer | integer ip | ip keyword | keyword long | long -text | text +text | keyword version | version |=== diff --git a/docs/reference/esql/functions/types/min.asciidoc b/docs/reference/esql/functions/types/min.asciidoc index 35ce5811e0cd0..564fb8dc3bfb0 100644 --- a/docs/reference/esql/functions/types/min.asciidoc +++ b/docs/reference/esql/functions/types/min.asciidoc @@ -12,6 +12,6 @@ integer | integer ip | ip keyword | keyword long | long -text | text +text | keyword version | version |=== diff --git a/docs/reference/esql/functions/types/mv_append.asciidoc b/docs/reference/esql/functions/types/mv_append.asciidoc index a1894e429ae82..05f9ff6b19f9e 100644 --- a/docs/reference/esql/functions/types/mv_append.asciidoc +++ b/docs/reference/esql/functions/types/mv_append.asciidoc @@ -16,6 +16,6 @@ integer | integer | integer ip | ip | ip keyword | keyword | keyword long | long | long -text | text | text +text | text | keyword version | version | version |=== diff --git a/docs/reference/esql/functions/types/mv_dedupe.asciidoc b/docs/reference/esql/functions/types/mv_dedupe.asciidoc index 68e546451c8cb..976de79bb0910 100644 --- a/docs/reference/esql/functions/types/mv_dedupe.asciidoc +++ b/docs/reference/esql/functions/types/mv_dedupe.asciidoc @@ -16,6 +16,6 @@ integer | integer ip | ip keyword | keyword long | long -text | text +text | keyword version | version |=== diff --git a/docs/reference/esql/functions/types/mv_first.asciidoc b/docs/reference/esql/functions/types/mv_first.asciidoc index 35633544d99a0..47736e76d1db4 100644 --- a/docs/reference/esql/functions/types/mv_first.asciidoc +++ b/docs/reference/esql/functions/types/mv_first.asciidoc @@ -16,7 +16,7 @@ integer | integer ip | ip keyword | keyword long | long -text | text +text | keyword unsigned_long | unsigned_long version | version |=== diff --git a/docs/reference/esql/functions/types/mv_last.asciidoc b/docs/reference/esql/functions/types/mv_last.asciidoc index 35633544d99a0..47736e76d1db4 100644 --- a/docs/reference/esql/functions/types/mv_last.asciidoc +++ b/docs/reference/esql/functions/types/mv_last.asciidoc @@ -16,7 +16,7 @@ integer | integer ip | ip keyword | keyword long | long -text | text +text | keyword unsigned_long | unsigned_long version | version |=== diff --git a/docs/reference/esql/functions/types/mv_max.asciidoc b/docs/reference/esql/functions/types/mv_max.asciidoc index 8ea36aebbad37..d4e014554c86c 100644 --- a/docs/reference/esql/functions/types/mv_max.asciidoc +++ b/docs/reference/esql/functions/types/mv_max.asciidoc @@ -12,7 +12,7 @@ integer | integer ip | ip keyword | keyword long | long -text | text +text | keyword unsigned_long | unsigned_long version | version |=== diff --git a/docs/reference/esql/functions/types/mv_min.asciidoc b/docs/reference/esql/functions/types/mv_min.asciidoc index 8ea36aebbad37..d4e014554c86c 100644 --- a/docs/reference/esql/functions/types/mv_min.asciidoc +++ b/docs/reference/esql/functions/types/mv_min.asciidoc @@ -12,7 +12,7 @@ integer | integer ip | ip keyword | keyword long | long -text | text +text | keyword unsigned_long | unsigned_long version | version |=== diff --git a/docs/reference/esql/functions/types/mv_slice.asciidoc b/docs/reference/esql/functions/types/mv_slice.asciidoc index 0a9dc073370c7..60c1f6315a599 100644 --- a/docs/reference/esql/functions/types/mv_slice.asciidoc +++ b/docs/reference/esql/functions/types/mv_slice.asciidoc @@ -16,6 +16,6 @@ integer | integer | integer | integer ip | integer | integer | ip keyword | integer | integer | keyword long | integer | integer | long -text | integer | integer | text +text | integer | integer | keyword version | integer | integer | version |=== diff --git a/docs/reference/esql/functions/types/mv_sort.asciidoc b/docs/reference/esql/functions/types/mv_sort.asciidoc index 93965187482ac..c21ea5983945e 100644 --- a/docs/reference/esql/functions/types/mv_sort.asciidoc +++ b/docs/reference/esql/functions/types/mv_sort.asciidoc @@ -12,6 +12,6 @@ integer | keyword | integer ip | keyword | ip keyword | keyword | keyword long | keyword | long -text | keyword | text +text | keyword | keyword version | keyword | version |=== diff --git a/docs/reference/esql/functions/types/reverse.asciidoc b/docs/reference/esql/functions/types/reverse.asciidoc index 974066d225bca..9e5dc1c477316 100644 --- a/docs/reference/esql/functions/types/reverse.asciidoc +++ b/docs/reference/esql/functions/types/reverse.asciidoc @@ -6,5 +6,5 @@ |=== str | result keyword | keyword -text | text +text | keyword |=== diff --git a/docs/reference/esql/functions/types/rtrim.asciidoc b/docs/reference/esql/functions/types/rtrim.asciidoc index 41d60049d59b8..1ba0e98ec8f09 100644 --- a/docs/reference/esql/functions/types/rtrim.asciidoc +++ b/docs/reference/esql/functions/types/rtrim.asciidoc @@ -6,5 +6,5 @@ |=== string | result keyword | keyword -text | text +text | keyword |=== diff --git a/docs/reference/esql/functions/types/to_lower.asciidoc b/docs/reference/esql/functions/types/to_lower.asciidoc index 974066d225bca..9e5dc1c477316 100644 --- a/docs/reference/esql/functions/types/to_lower.asciidoc +++ b/docs/reference/esql/functions/types/to_lower.asciidoc @@ -6,5 +6,5 @@ |=== str | result keyword | keyword -text | text +text | keyword |=== diff --git a/docs/reference/esql/functions/types/to_upper.asciidoc b/docs/reference/esql/functions/types/to_upper.asciidoc index 974066d225bca..9e5dc1c477316 100644 --- a/docs/reference/esql/functions/types/to_upper.asciidoc +++ b/docs/reference/esql/functions/types/to_upper.asciidoc @@ -6,5 +6,5 @@ |=== str | result keyword | keyword -text | text +text | keyword |=== diff --git a/docs/reference/esql/functions/types/top.asciidoc b/docs/reference/esql/functions/types/top.asciidoc index 25d7962a27252..699bc7b10ce84 100644 --- a/docs/reference/esql/functions/types/top.asciidoc +++ b/docs/reference/esql/functions/types/top.asciidoc @@ -12,5 +12,5 @@ integer | integer | keyword | integer ip | integer | keyword | ip keyword | integer | keyword | keyword long | integer | keyword | long -text | integer | keyword | text +text | integer | keyword | keyword |=== diff --git a/docs/reference/esql/functions/types/trim.asciidoc b/docs/reference/esql/functions/types/trim.asciidoc index 41d60049d59b8..1ba0e98ec8f09 100644 --- a/docs/reference/esql/functions/types/trim.asciidoc +++ b/docs/reference/esql/functions/types/trim.asciidoc @@ -6,5 +6,5 @@ |=== string | result keyword | keyword -text | text +text | keyword |=== diff --git a/docs/reference/esql/functions/types/values.asciidoc b/docs/reference/esql/functions/types/values.asciidoc index 35ce5811e0cd0..564fb8dc3bfb0 100644 --- a/docs/reference/esql/functions/types/values.asciidoc +++ b/docs/reference/esql/functions/types/values.asciidoc @@ -12,6 +12,6 @@ integer | integer ip | ip keyword | keyword long | long -text | text +text | keyword version | version |=== diff --git a/x-pack/plugin/build.gradle b/x-pack/plugin/build.gradle index 8297ef5161fb0..cf6a8f51d1b81 100644 --- a/x-pack/plugin/build.gradle +++ b/x-pack/plugin/build.gradle @@ -84,5 +84,7 @@ tasks.named("yamlRestCompatTestTransform").configure({ task -> task.skipTest("security/10_forbidden/Test bulk response with invalid credentials", "warning does not exist for compatibility") task.skipTest("inference/inference_crud/Test get all", "Assertions on number of inference models break due to default configs") task.skipTest("esql/60_usage/Basic ESQL usage output (telemetry)", "The telemetry output changed. We dropped a column. That's safe.") + task.skipTest("esql/80_text/reverse text", "The output type changed from TEXT to KEYWORD.") + task.skipTest("esql/80_text/values function", "The output type changed from TEXT to KEYWORD.") }) diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java index 5041c96128a1e..1b1eff8a07b1d 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java @@ -584,6 +584,10 @@ static Builder builder() { return new Builder(); } + public DataType noText() { + return this == TEXT ? KEYWORD : this; + } + /** * Named parameters with default values. It's just easier to do this with * a builder in java.... diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/convert.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/convert.csv-spec index 1397965145a1a..49960d1b5b0f3 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/convert.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/convert.csv-spec @@ -58,11 +58,11 @@ ROW zero="0"::double convertToString required_capability: casting_operator -ROW one=1::keyword, two=2::text, three=3::string +ROW one=1::keyword, two=2::double, three=3::string ; - one:keyword | two:keyword | three:keyword -1 |2 |3 +one:keyword | two:double | three:keyword +1 | 2.0 | 3 ; convertToDatetime diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index 2dc21a86e6394..80ba18b85a004 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -131,17 +131,19 @@ OPQS | OPQS | OPQS | ___ | small maxOfText required_capability: agg_max_min_string_support +required_capability: functions_never_emit_text from airports | eval x = name | where scalerank >= 9 | stats max(name), a = max(name), b = max(x); -max(name):text | a:text | b:text -Zaporozhye Int'l | Zaporozhye Int'l | Zaporozhye Int'l +max(name):keyword | a:keyword | b:keyword +Zaporozhye Int'l | Zaporozhye Int'l | Zaporozhye Int'l ; maxOfTextGrouping required_capability: agg_max_min_string_support +required_capability: functions_never_emit_text from airports | eval x = name | where scalerank >= 9 @@ -149,7 +151,7 @@ from airports | sort type asc | limit 4; -max(name):text | a:text | b:text | type:keyword +max(name):keyword| a:keyword | b:keyword | type:keyword Cheongju Int'l | Cheongju Int'l | Cheongju Int'l | major Zaporozhye Int'l | Zaporozhye Int'l | Zaporozhye Int'l | mid Zaporozhye Int'l | Zaporozhye Int'l | Zaporozhye Int'l | military @@ -211,17 +213,19 @@ LUH | LUH | LUH | ___ | small minOfText required_capability: agg_max_min_string_support +required_capability: functions_never_emit_text from airports | eval x = name | where scalerank >= 9 | stats min(name), a = min(name), b = min(x); -min(name):text | a:text | b:text +min(name):keyword | a:keyword | b:keyword Abdul Rachman Saleh | Abdul Rachman Saleh | Abdul Rachman Saleh ; minOfTextGrouping required_capability: agg_max_min_string_support +required_capability: functions_never_emit_text from airports | eval x = name | where scalerank >= 9 @@ -229,7 +233,7 @@ from airports | sort type asc | limit 4; -min(name):text | a:text | b:text | type:keyword +min(name):keyword | a:keyword | b:keyword | type:keyword Chandigarh Int'l | Chandigarh Int'l | Chandigarh Int'l | major Abdul Rachman Saleh | Abdul Rachman Saleh | Abdul Rachman Saleh | mid Abdul Rachman Saleh | Abdul Rachman Saleh | Abdul Rachman Saleh | military diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_top.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_top.csv-spec index 80d11425c5bb6..6eebb2f4d19da 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_top.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_top.csv-spec @@ -263,6 +263,7 @@ FROM employees topText required_capability: agg_top required_capability: agg_top_string_support +required_capability: functions_never_emit_text # we don't need MATCH, but the loader for books.csv is busted in CsvTests required_capability: match_operator @@ -273,13 +274,14 @@ FROM books calc = TOP(calc, 3, "asc"), evil = TOP(CASE(year < 1980, title, author), 3, "desc"); -title:text | calc:keyword | evil:text +title:keyword | calc:keyword | evil:keyword [Worlds of Exile and Illusion: Three Complete Novels of the Hainish Series in One Volume--Rocannon's World, Planet of Exile, City of Illusions, Woman-The Full Story: A Dynamic Celebration of Freedoms, Winter notes on summer impressions] | ["'Bria", "Gent", "HE UN"] | [William Faulkner, William Faulkner, William Faulkner] ; topTextGrouping required_capability: agg_top required_capability: agg_top_string_support +required_capability: functions_never_emit_text # we don't need MATCH, but the loader for books.csv is busted in CsvTests required_capability: match_operator @@ -293,7 +295,7 @@ FROM books | SORT author | LIMIT 3; - title:text | calc:keyword | evil:text | author:text + title:keyword | calc:keyword | evil:keyword | author:text A Tolkien Compass: Including J. R. R. Tolkien's Guide to the Names in The Lord of the Rings | Tolk | A Tolkien Compass: Including J. R. R. Tolkien's Guide to the Names in The Lord of the Rings | Agnes Perkins The Lord of the Rings Poster Collection: Six Paintings by Alan Lee (No. 1) | he Lo | [J. R. R. Tolkien, Alan Lee] | Alan Lee A Gentle Creature and Other Stories: White Nights, A Gentle Creature, and The Dream of a Ridiculous Man (The World's Classics) | Gent | [W. J. Leatherbarrow, Fyodor Dostoevsky, Alan Myers] | Alan Myers diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec index 00fa2fddb2106..305b8f3d8011e 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec @@ -1289,6 +1289,7 @@ x:integer | y:string reverseWithTextFields required_capability: fn_reverse +required_capability: functions_never_emit_text FROM books | EVAL title_reversed = REVERSE(title), author_reversed_twice = REVERSE(REVERSE(author)), eq = author_reversed_twice == author | KEEP title, title_reversed, author, author_reversed_twice, eq, book_no @@ -1296,7 +1297,7 @@ FROM books | WHERE book_no IN ("1211", "1463") | LIMIT 2; -title:text | title_reversed:text | author:text | author_reversed_twice:text | eq:boolean | book_no:keyword +title:text | title_reversed:keyword | author:text | author_reversed_twice:keyword | eq:boolean | book_no:keyword The brothers Karamazov | vozamaraK srehtorb ehT | Fyodor Dostoevsky | Fyodor Dostoevsky | true | 1211 Realms of Tolkien: Images of Middle-earth | htrae-elddiM fo segamI :neikloT fo smlaeR | J. R. R. Tolkien | J. R. R. Tolkien | true | 1463 ; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 55236af648236..196a864db2c15 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -74,6 +74,11 @@ public enum Cap { */ FN_SUBSTRING_EMPTY_NULL, + /** + * All functions that take TEXT should never emit TEXT, only KEYWORD. #114334 + */ + FUNCTIONS_NEVER_EMIT_TEXT, + /** * Support for the {@code INLINESTATS} syntax. */ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java index ee16193efdccc..ac2d4ff3cbc43 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Max.java @@ -55,7 +55,7 @@ public class Max extends AggregateFunction implements ToAggregator, SurrogateExp ); @FunctionInfo( - returnType = { "boolean", "double", "integer", "long", "date", "ip", "keyword", "text", "long", "version" }, + returnType = { "boolean", "double", "integer", "long", "date", "ip", "keyword", "long", "version" }, description = "The maximum value of a field.", isAggregation = true, examples = { @@ -119,7 +119,7 @@ protected TypeResolution resolveType() { @Override public DataType dataType() { - return field().dataType(); + return field().dataType().noText(); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java index 7aaa41ea6ab11..a5fc8196847b7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Min.java @@ -55,7 +55,7 @@ public class Min extends AggregateFunction implements ToAggregator, SurrogateExp ); @FunctionInfo( - returnType = { "boolean", "double", "integer", "long", "date", "ip", "keyword", "text", "long", "version" }, + returnType = { "boolean", "double", "integer", "long", "date", "ip", "keyword", "long", "version" }, description = "The minimum value of a field.", isAggregation = true, examples = { @@ -119,7 +119,7 @@ protected TypeResolution resolveType() { @Override public DataType dataType() { - return field().dataType(); + return field().dataType().noText(); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Top.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Top.java index 4f81e0a897f9c..e0a7da806b3ac 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Top.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Top.java @@ -51,7 +51,7 @@ public class Top extends AggregateFunction implements ToAggregator, SurrogateExp private static final String ORDER_DESC = "DESC"; @FunctionInfo( - returnType = { "boolean", "double", "integer", "long", "date", "ip", "keyword", "text" }, + returnType = { "boolean", "double", "integer", "long", "date", "ip", "keyword" }, description = "Collects the top values for a field. Includes repeated values.", isAggregation = true, examples = @Example(file = "stats_top", tag = "top") @@ -175,7 +175,7 @@ protected TypeResolution resolveType() { @Override public DataType dataType() { - return field().dataType(); + return field().dataType().noText(); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java index 8d576839c3c5c..111eab051719b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Values.java @@ -52,7 +52,7 @@ public class Values extends AggregateFunction implements ToAggregator { ); @FunctionInfo( - returnType = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "text", "version" }, + returnType = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "version" }, preview = true, description = "Returns all values in a group as a multivalued field. The order of the returned values isn't guaranteed. " + "If you need the values returned in order use <>.", @@ -105,7 +105,7 @@ public Values withFilter(Expression filter) { @Override public DataType dataType() { - return field().dataType(); + return field().dataType().noText(); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java index 4d34033286f52..53b51f16d4183 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java @@ -164,6 +164,6 @@ public final Expression field() { @Override public DataType dataType() { - return field.dataType(); + return field.dataType().noText(); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java index d833a796cbecc..824f02ca7ccbb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java @@ -73,7 +73,6 @@ ConditionEvaluatorSupplier toEvaluator(ToEvaluator toEvaluator) { "ip", "keyword", "long", - "text", "unsigned_long", "version" }, description = """ @@ -195,12 +194,12 @@ protected TypeResolution resolveType() { private TypeResolution resolveValueType(Expression value, int position) { if (dataType == null || dataType == NULL) { - dataType = value.dataType(); + dataType = value.dataType().noText(); return TypeResolution.TYPE_RESOLVED; } return TypeResolutions.isType( value, - t -> t == dataType, + t -> t.noText() == dataType, sourceText(), TypeResolutions.ParamOrdinal.fromIndex(position), dataType.typeName() diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Greatest.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Greatest.java index aad2d37d414b8..abc2ea85198fa 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Greatest.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Greatest.java @@ -43,7 +43,7 @@ public class Greatest extends EsqlScalarFunction implements OptionalArgument { private DataType dataType; @FunctionInfo( - returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "version" }, + returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "version" }, description = "Returns the maximum value from multiple columns. This is similar to <>\n" + "except it is intended to run on multiple columns at once.", note = "When run on `keyword` or `text` fields, this returns the last string in alphabetical order. " @@ -104,12 +104,12 @@ protected TypeResolution resolveType() { for (int position = 0; position < children().size(); position++) { Expression child = children().get(position); if (dataType == null || dataType == NULL) { - dataType = child.dataType(); + dataType = child.dataType().noText(); continue; } TypeResolution resolution = TypeResolutions.isType( child, - t -> t == dataType, + t -> t.noText() == dataType, sourceText(), TypeResolutions.ParamOrdinal.fromIndex(position), dataType.typeName() diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Least.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Least.java index 70ba9319385f3..a49fff0aa888b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Least.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Least.java @@ -43,7 +43,7 @@ public class Least extends EsqlScalarFunction implements OptionalArgument { private DataType dataType; @FunctionInfo( - returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "version" }, + returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "version" }, description = "Returns the minimum value from multiple columns. " + "This is similar to <> except it is intended to run on multiple columns at once.", examples = @Example(file = "math", tag = "least") @@ -102,12 +102,12 @@ protected TypeResolution resolveType() { for (int position = 0; position < children().size(); position++) { Expression child = children().get(position); if (dataType == null || dataType == NULL) { - dataType = child.dataType(); + dataType = child.dataType().noText(); continue; } TypeResolution resolution = TypeResolutions.isType( child, - t -> t == dataType, + t -> t.noText() == dataType, sourceText(), TypeResolutions.ParamOrdinal.fromIndex(position), dataType.typeName() diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppend.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppend.java index 72d96a86d31eb..bcd6f4c30bf8a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppend.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppend.java @@ -62,7 +62,6 @@ public class MvAppend extends EsqlScalarFunction implements EvaluatorMapper { "ip", "keyword", "long", - "text", "version" }, description = "Concatenates values of two multi-value fields." ) @@ -134,12 +133,12 @@ protected TypeResolution resolveType() { if (resolution.unresolved()) { return resolution; } - dataType = field1.dataType(); + dataType = field1.dataType().noText(); if (dataType == DataType.NULL) { - dataType = field2.dataType(); + dataType = field2.dataType().noText(); return isType(field2, DataType::isRepresentable, sourceText(), SECOND, "representable"); } - return isType(field2, t -> t == dataType, sourceText(), SECOND, dataType.typeName()); + return isType(field2, t -> t.noText() == dataType, sourceText(), SECOND, dataType.typeName()); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupe.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupe.java index 34b89b4f78997..9a2b041fafeb6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupe.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupe.java @@ -46,7 +46,6 @@ public class MvDedupe extends AbstractMultivalueFunction { "ip", "keyword", "long", - "text", "version" }, description = "Remove duplicate values from a multivalued field.", note = "`MV_DEDUPE` may, but won't always, sort the values in the column.", diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirst.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirst.java index d5d203e7bb3d1..957c74883ffdf 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirst.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirst.java @@ -53,7 +53,6 @@ public class MvFirst extends AbstractMultivalueFunction { "ip", "keyword", "long", - "text", "unsigned_long", "version" }, description = """ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLast.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLast.java index 21487f14817cd..fedbc1934d1be 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLast.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLast.java @@ -53,7 +53,6 @@ public class MvLast extends AbstractMultivalueFunction { "ip", "keyword", "long", - "text", "unsigned_long", "version" }, description = """ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMax.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMax.java index 6a53c652d3420..5386a9e3ef763 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMax.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMax.java @@ -36,7 +36,7 @@ public class MvMax extends AbstractMultivalueFunction { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "MvMax", MvMax::new); @FunctionInfo( - returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "unsigned_long", "version" }, + returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "unsigned_long", "version" }, description = "Converts a multivalued expression into a single valued column containing the maximum value.", examples = { @Example(file = "math", tag = "mv_max"), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMin.java index 4cc83c99b2c08..a2b3c53f322ba 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMin.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMin.java @@ -36,7 +36,7 @@ public class MvMin extends AbstractMultivalueFunction { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "MvMin", MvMin::new); @FunctionInfo( - returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "unsigned_long", "version" }, + returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "unsigned_long", "version" }, description = "Converts a multivalued expression into a single valued column containing the minimum value.", examples = { @Example(file = "math", tag = "mv_min"), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java index ef562c339dfd9..f4f9679dc3704 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java @@ -67,7 +67,6 @@ public class MvSlice extends EsqlScalarFunction implements OptionalArgument, Eva "ip", "keyword", "long", - "text", "version" }, description = """ Returns a subset of the multivalued field using the start and end index values. @@ -240,7 +239,7 @@ protected NodeInfo info() { @Override public DataType dataType() { - return field.dataType(); + return field.dataType().noText(); } static int adjustIndex(int oldOffset, int fieldValueCount, int first) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java index 5ca5618bf2a54..2286a1357ced8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java @@ -69,7 +69,7 @@ public class MvSort extends EsqlScalarFunction implements OptionalArgument, Vali private static final String INVALID_ORDER_ERROR = "Invalid order value in [{}], expected one of [{}, {}] but got [{}]"; @FunctionInfo( - returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "version" }, + returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "version" }, description = "Sorts a multivalued field in lexicographical order.", examples = @Example(file = "ints", tag = "mv_sort") ) @@ -226,7 +226,7 @@ protected NodeInfo info() { @Override public DataType dataType() { - return field.dataType(); + return field.dataType().noText(); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java index 6b9c8d0da025b..52686430ca5b5 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java @@ -61,7 +61,6 @@ public class Coalesce extends EsqlScalarFunction implements OptionalArgument { "ip", "keyword", "long", - "text", "version" }, description = "Returns the first of its arguments that is not null. If all arguments are null, it returns `null`.", examples = { @Example(file = "null", tag = "coalesce") } @@ -145,12 +144,12 @@ protected TypeResolution resolveType() { for (int position = 0; position < children().size(); position++) { if (dataType == null || dataType == NULL) { - dataType = children().get(position).dataType(); + dataType = children().get(position).dataType().noText(); continue; } TypeResolution resolution = TypeResolutions.isType( children().get(position), - t -> t == dataType, + t -> t.noText() == dataType, sourceText(), TypeResolutions.ParamOrdinal.fromIndex(position), dataType.typeName() diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LTrim.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LTrim.java index 8a4a5f4d841a5..0b7233f10b454 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LTrim.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LTrim.java @@ -34,7 +34,7 @@ public class LTrim extends UnaryScalarFunction { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "LTrim", LTrim::new); @FunctionInfo( - returnType = { "keyword", "text" }, + returnType = { "keyword" }, description = "Removes leading whitespaces from a string.", examples = @Example(file = "string", tag = "ltrim") ) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RTrim.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RTrim.java index b79e1adf99a20..80809a444f5e8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RTrim.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RTrim.java @@ -34,7 +34,7 @@ public class RTrim extends UnaryScalarFunction { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "RTrim", RTrim::new); @FunctionInfo( - returnType = { "keyword", "text" }, + returnType = { "keyword" }, description = "Removes trailing whitespaces from a string.", examples = @Example(file = "string", tag = "rtrim") ) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Reverse.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Reverse.java index e161566838cd9..02787999f24f7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Reverse.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Reverse.java @@ -37,7 +37,7 @@ public class Reverse extends UnaryScalarFunction { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Reverse", Reverse::new); @FunctionInfo( - returnType = { "keyword", "text" }, + returnType = { "keyword" }, description = "Returns a new string representing the input string in reverse order.", examples = { @Example(file = "string", tag = "reverse"), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLower.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLower.java index c475469488d7b..5f2bbcde52166 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLower.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLower.java @@ -39,7 +39,7 @@ public class ToLower extends EsqlConfigurationFunction { private final Expression field; @FunctionInfo( - returnType = { "keyword", "text" }, + returnType = { "keyword" }, description = "Returns a new string representing the input string converted to lower case.", examples = @Example(file = "string", tag = "to_lower") ) @@ -72,7 +72,7 @@ public String getWriteableName() { @Override public DataType dataType() { - return field.dataType(); + return DataType.KEYWORD; } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpper.java index 1b5084a7916ef..7fdd5e39f96f3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpper.java @@ -39,7 +39,7 @@ public class ToUpper extends EsqlConfigurationFunction { private final Expression field; @FunctionInfo( - returnType = { "keyword", "text" }, + returnType = { "keyword" }, description = "Returns a new string representing the input string converted to upper case.", examples = @Example(file = "string", tag = "to_upper") ) @@ -72,7 +72,7 @@ public String getWriteableName() { @Override public DataType dataType() { - return field.dataType(); + return DataType.KEYWORD; } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Trim.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Trim.java index 1fe7529caa2da..ef0afc3a4e6cb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Trim.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Trim.java @@ -34,7 +34,7 @@ public final class Trim extends UnaryScalarFunction { public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Trim", Trim::new); @FunctionInfo( - returnType = { "keyword", "text" }, + returnType = { "keyword" }, description = "Removes leading and trailing whitespaces from a string.", examples = @Example(file = "string", tag = "trim") ) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java index edc3081a33681..05a658ec411f3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java @@ -117,7 +117,6 @@ public class EsqlDataTypeConverter { entry(LONG, ToLong::new), // ToRadians, typeless entry(KEYWORD, ToString::new), - entry(TEXT, ToString::new), entry(UNSIGNED_LONG, ToUnsignedLong::new), entry(VERSION, ToVersion::new), entry(DATE_PERIOD, ToDatePeriod::new), diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/ParsingTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/ParsingTests.java index 8867e7425a92e..3cafd42b731f6 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/ParsingTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/ParsingTests.java @@ -88,9 +88,6 @@ public void testInlineCast() throws IOException { Collections.sort(namesAndAliases); for (String nameOrAlias : namesAndAliases) { DataType expectedType = DataType.fromNameOrAlias(nameOrAlias); - if (expectedType == DataType.TEXT) { - expectedType = DataType.KEYWORD; - } if (EsqlDataTypeConverter.converterFunctionFactory(expectedType) == null) { continue; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java index 2ba175657b6c2..c12e0a8684ba9 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java @@ -1435,7 +1435,7 @@ public static TestCase typeError(List data, String expectedTypeError) this.source = Source.EMPTY; this.data = data; this.evaluatorToString = evaluatorToString; - this.expectedType = expectedType; + this.expectedType = expectedType == null ? null : expectedType.noText(); @SuppressWarnings("unchecked") Matcher downcast = (Matcher) matcher; this.matcher = downcast; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java index ce2bf7e262ae9..9756804a1ec0f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MaxTests.java @@ -128,7 +128,7 @@ public static Iterable parameters() { return new TestCaseSupplier.TestCase( List.of(TestCaseSupplier.TypedData.multiRow(List.of(value), DataType.TEXT, "field")), "Max[field=Attribute[channel=0]]", - DataType.TEXT, + DataType.KEYWORD, equalTo(value) ); }), diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java index 7250072cd2003..171181496c889 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/MinTests.java @@ -128,7 +128,7 @@ public static Iterable parameters() { return new TestCaseSupplier.TestCase( List.of(TestCaseSupplier.TypedData.multiRow(List.of(value), DataType.TEXT, "field")), "Min[field=Attribute[channel=0]]", - DataType.TEXT, + DataType.KEYWORD, equalTo(value) ); }), diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java index fbb7c691b1d94..51b1c72c6e287 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java @@ -151,6 +151,33 @@ private static void twoAndThreeArgs( return testCase(type, typedData, lhsOrRhs ? lhs : rhs, toStringMatcher(1, false), false, null, addWarnings(warnings)); }) ); + if (type.noText() == DataType.KEYWORD) { + DataType otherType = type == DataType.KEYWORD ? DataType.TEXT : DataType.KEYWORD; + suppliers.add( + new TestCaseSupplier( + TestCaseSupplier.nameFrom(Arrays.asList(cond, type, otherType)), + List.of(DataType.BOOLEAN, type, otherType), + () -> { + Object lhs = randomLiteral(type).value(); + Object rhs = randomLiteral(otherType).value(); + List typedData = List.of( + cond(cond, "cond"), + new TestCaseSupplier.TypedData(lhs, type, "lhs"), + new TestCaseSupplier.TypedData(rhs, otherType, "rhs") + ); + return testCase( + type, + typedData, + lhsOrRhs ? lhs : rhs, + toStringMatcher(1, false), + false, + null, + addWarnings(warnings) + ); + } + ) + ); + } if (lhsOrRhs) { suppliers.add( new TestCaseSupplier( @@ -222,7 +249,6 @@ private static void twoAndThreeArgs( ) ); } - suppliers.add( new TestCaseSupplier( "partial foldable " + TestCaseSupplier.nameFrom(Arrays.asList(cond, type, type)), @@ -292,6 +318,33 @@ private static void twoAndThreeArgs( } ) ); + if (type.noText() == DataType.KEYWORD) { + DataType otherType = type == DataType.KEYWORD ? DataType.TEXT : DataType.KEYWORD; + suppliers.add( + new TestCaseSupplier( + TestCaseSupplier.nameFrom(Arrays.asList(DataType.NULL, type, otherType)), + List.of(DataType.NULL, type, otherType), + () -> { + Object lhs = randomLiteral(type).value(); + Object rhs = randomLiteral(otherType).value(); + List typedData = List.of( + new TestCaseSupplier.TypedData(null, DataType.NULL, "cond"), + new TestCaseSupplier.TypedData(lhs, type, "lhs"), + new TestCaseSupplier.TypedData(rhs, otherType, "rhs") + ); + return testCase( + type, + typedData, + lhsOrRhs ? lhs : rhs, + startsWith("CaseEagerEvaluator[conditions=[ConditionEvaluator[condition="), + false, + null, + addWarnings(warnings) + ); + } + ) + ); + } } suppliers.add( new TestCaseSupplier( @@ -804,7 +857,7 @@ private static String typeErrorMessage(boolean includeOrdinal, List ty if (types.get(0) != DataType.BOOLEAN && types.get(0) != DataType.NULL) { return typeErrorMessage(includeOrdinal, types, 0, "boolean"); } - DataType mainType = types.get(1); + DataType mainType = types.get(1).noText(); for (int i = 2; i < types.size(); i++) { if (i % 2 == 0 && i != types.size() - 1) { // condition @@ -813,7 +866,7 @@ private static String typeErrorMessage(boolean includeOrdinal, List ty } } else { // value - if (types.get(i) != mainType) { + if (types.get(i).noText() != mainType) { return typeErrorMessage(includeOrdinal, types, i, mainType.typeName()); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java index 7af1c180fd7b9..1f564ecb87f1e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java @@ -47,7 +47,7 @@ public static Iterable parameters() { suppliers.add(supplier("text unicode", DataType.TEXT, () -> randomUnicodeOfLengthBetween(1, 10))); // add null as parameter - return parameterSuppliersFromTypedDataWithDefaultChecks(false, suppliers, (v, p) -> "string"); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers, (v, p) -> "string"); } public void testRandomLocale() { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java index c8bbe03bde411..7c136c3bb83c2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java @@ -47,7 +47,7 @@ public static Iterable parameters() { suppliers.add(supplier("text unicode", DataType.TEXT, () -> randomUnicodeOfLengthBetween(1, 10))); // add null as parameter - return parameterSuppliersFromTypedDataWithDefaultChecks(false, suppliers, (v, p) -> "string"); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers, (v, p) -> "string"); } public void testRandomLocale() { diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml index 88ef03a22d70c..55bd39bdd73cc 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/80_text.yml @@ -392,7 +392,7 @@ setup: - method: POST path: /_query parameters: [method, path, parameters, capabilities] - capabilities: [fn_reverse] + capabilities: [fn_reverse, functions_never_emit_text] reason: "reverse not yet added" - do: allowed_warnings_regex: @@ -402,10 +402,10 @@ setup: query: 'FROM test | SORT name | EVAL job_reversed = REVERSE(job), tag_reversed = REVERSE(tag) | KEEP job_reversed, tag_reversed' - match: { columns.0.name: "job_reversed" } - - match: { columns.0.type: "text" } + - match: { columns.0.type: "keyword" } - match: { columns.1.name: "tag_reversed" } - - match: { columns.1.type: "text" } + - match: { columns.1.type: "keyword" } - length: { values: 2 } - match: { values.0: [ "rotceriD TI", "rab oof" ] } @@ -573,7 +573,6 @@ setup: body: query: 'FROM test | STATS job = VALUES(job) | EVAL job = MV_SORT(job) | LIMIT 1' - match: { columns.0.name: "job" } - - match: { columns.0.type: "text" } - length: { values: 1 } - match: { values.0: [ [ "IT Director", "Payroll Specialist" ] ] } @@ -592,7 +591,22 @@ setup: - match: { columns.0.name: "tag" } - match: { columns.0.type: "text" } - match: { columns.1.name: "job" } - - match: { columns.1.type: "text" } - length: { values: 2 } - match: { values.0: [ "baz", [ "Other", "Payroll Specialist" ] ] } - match: { values.1: [ "foo bar", "IT Director" ] } + +--- +"remove text typecast": + - requires: + capabilities: + - method: POST + path: /_query + parameters: [ method, path, parameters, capabilities ] + capabilities: [ functions_never_emit_text ] + reason: "Disabling ::text was done in 8.17 as part of removing all possibilities to emit text" + + - do: + catch: /Unsupported conversion to type \[TEXT\]/ + esql.query: + body: + query: 'FROM test | EVAL tag = name::text | KEEP name' From 16f61b460033baed6e7ae725fad96860d7a7f5e5 Mon Sep 17 00:00:00 2001 From: Moritz Mack Date: Fri, 25 Oct 2024 10:15:56 +0200 Subject: [PATCH 50/60] Increase assert timeout for DeprecationHttpIT to reduce risk of failing when test cluster is slow to warm up (fixes #115179) (#115621) --- .../xpack/deprecation/DeprecationHttpIT.java | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/DeprecationHttpIT.java b/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/DeprecationHttpIT.java index 3fb9573dd7b62..4a17c2abbd797 100644 --- a/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/DeprecationHttpIT.java +++ b/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/DeprecationHttpIT.java @@ -121,7 +121,7 @@ public void testDeprecatedSettingsReturnWarnings() throws Exception { List> documents = DeprecationTestUtils.getIndexedDeprecations(client(), xOpaqueId()); logger.warn(documents); assertThat(documents, hasSize(2)); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } finally { Response response = cleanupSettings(); List warningHeaders = getWarningHeaders(response.getHeaders()); @@ -245,7 +245,7 @@ private void doTestDeprecationWarningsAppearInHeaders(String xOpaqueId) throws E var documents = DeprecationTestUtils.getIndexedDeprecations(client(), xOpaqueId); logger.warn(documents); assertThat(documents, hasSize(headerMatchers.size())); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } public void testDeprecationRouteThrottling() throws Exception { @@ -275,7 +275,7 @@ public void testDeprecationRouteThrottling() throws Exception { ) ) ); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } @@ -303,7 +303,7 @@ public void testDisableDeprecationLogIndexing() throws Exception { ) ) ); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } finally { configureWriteDeprecationLogsToIndex(null); } @@ -369,7 +369,7 @@ public void testDeprecationMessagesCanBeIndexed() throws Exception { ) ) ); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } @@ -414,7 +414,7 @@ public void testDeprecationCriticalWarnMessagesCanBeIndexed() throws Exception { ) ) ); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } @@ -473,7 +473,7 @@ public void testDeprecationWarnMessagesCanBeIndexed() throws Exception { ) ) ); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } @@ -504,7 +504,7 @@ public void testDeprecateAndKeep() throws Exception { ) ) ); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } public void testReplacesInCurrentVersion() throws Exception { @@ -534,7 +534,7 @@ public void testReplacesInCurrentVersion() throws Exception { ) ) ); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } public void testReplacesInCompatibleVersion() throws Exception { @@ -579,7 +579,7 @@ public void testReplacesInCompatibleVersion() throws Exception { ) ) ); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } /** @@ -649,7 +649,7 @@ public void testCompatibleMessagesCanBeIndexed() throws Exception { ) ) ); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } @@ -690,7 +690,7 @@ public void testDeprecationIndexingCacheReset() throws Exception { ) ) ); - }, 30, TimeUnit.SECONDS); + }, 45, TimeUnit.SECONDS); } From 9394e88c0f00e58e6b49e7607fb70bde119e4e1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Fri, 25 Oct 2024 10:18:01 +0200 Subject: [PATCH 51/60] [DOCS] Updates inference processor docs. (#115566) --- docs/reference/ingest/processors/inference.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/ingest/processors/inference.asciidoc b/docs/reference/ingest/processors/inference.asciidoc index 4699f634afe37..9c6f0592a1d91 100644 --- a/docs/reference/ingest/processors/inference.asciidoc +++ b/docs/reference/ingest/processors/inference.asciidoc @@ -16,7 +16,7 @@ ingested in the pipeline. [options="header"] |====== | Name | Required | Default | Description -| `model_id` . | yes | - | (String) The ID or alias for the trained model, or the ID of the deployment. +| `model_id` . | yes | - | (String) An inference ID, a model deployment ID, a trained model ID or an alias. | `input_output` | no | - | (List) Input fields for {infer} and output (destination) fields for the {infer} results. This option is incompatible with the `target_field` and `field_map` options. | `target_field` | no | `ml.inference.` | (String) Field added to incoming documents to contain results objects. | `field_map` | no | If defined the model's default field map | (Object) Maps the document field names to the known field names of the model. This mapping takes precedence over any default mappings provided in the model configuration. From 11401a35d41c723e98c0dcc09f4874c9c842d349 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 25 Oct 2024 19:45:39 +1100 Subject: [PATCH 52/60] Mute org.elasticsearch.oldrepos.OldRepositoryAccessIT testOldRepoAccess #115631 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 084bf27d6a11b..5c94c0aff60b6 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -279,6 +279,9 @@ tests: - class: org.elasticsearch.test.rest.ClientYamlTestSuiteIT method: test {yaml=indices.create/10_basic/Create lookup index} issue: https://github.com/elastic/elasticsearch/issues/115605 +- class: org.elasticsearch.oldrepos.OldRepositoryAccessIT + method: testOldRepoAccess + issue: https://github.com/elastic/elasticsearch/issues/115631 # Examples: # From 452ca351d3d0887db96c124dd83bb755e6e5894f Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Fri, 25 Oct 2024 11:19:31 +0200 Subject: [PATCH 53/60] [DOCS] Test trivial commit (#115579) (#115628) (cherry picked from commit e642dd84815ea476d1e7b99f26f65cb5099d4e39) --- .../search-your-data/search-application-overview.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/search/search-your-data/search-application-overview.asciidoc b/docs/reference/search/search-your-data/search-application-overview.asciidoc index e12b55911740b..13cc97bb8aeab 100644 --- a/docs/reference/search/search-your-data/search-application-overview.asciidoc +++ b/docs/reference/search/search-your-data/search-application-overview.asciidoc @@ -74,7 +74,7 @@ To create a new search application in {kib}: . Name your search application. . Select *Create*. -Your search application should now be available in the list of search applications. +Your search application should now be available in the list. //[.screenshot] // image::../../images/search-applications/search-applications-create.png[Create search application screen] From b83042aa432776e4e1bcfe5c3c2f17ff2467a5e5 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Fri, 25 Oct 2024 20:30:37 +1100 Subject: [PATCH 54/60] Mute org.elasticsearch.xpack.esql.analysis.AnalyzerTests testMvAppendValidation #115636 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 5c94c0aff60b6..4869b669f6220 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -282,6 +282,9 @@ tests: - class: org.elasticsearch.oldrepos.OldRepositoryAccessIT method: testOldRepoAccess issue: https://github.com/elastic/elasticsearch/issues/115631 +- class: org.elasticsearch.xpack.esql.analysis.AnalyzerTests + method: testMvAppendValidation + issue: https://github.com/elastic/elasticsearch/issues/115636 # Examples: # From f1de84b51cf753e2bd1e381c0a6858797229b233 Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Fri, 25 Oct 2024 11:39:52 +0200 Subject: [PATCH 55/60] [DOCS] Fix casing in servicenow docs config (#115634) --- .../connector/docs/connectors-servicenow.asciidoc | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/reference/connector/docs/connectors-servicenow.asciidoc b/docs/reference/connector/docs/connectors-servicenow.asciidoc index 089a3b405d8a5..a02c418f11d74 100644 --- a/docs/reference/connector/docs/connectors-servicenow.asciidoc +++ b/docs/reference/connector/docs/connectors-servicenow.asciidoc @@ -81,7 +81,7 @@ Comma-separated list of services to fetch data from ServiceNow. If the value is - link:https://docs.servicenow.com/bundle/tokyo-it-service-management/page/product/incident-management/concept/c_IncidentManagement.html[Incident] - link:https://docs.servicenow.com/bundle/tokyo-servicenow-platform/page/use/service-catalog-requests/task/t_AddNewRequestItems.html[Requested Item] - link:https://docs.servicenow.com/bundle/tokyo-customer-service-management/page/product/customer-service-management/task/t_SearchTheKnowledgeBase.html[Knowledge] -- link:https://docs.servicenow.com/bundle/tokyo-it-service-management/page/product/change-management/task/t_CreateAChange.html[Change Request] +- link:https://docs.servicenow.com/bundle/tokyo-it-service-management/page/product/change-management/task/t_CreateAChange.html[Change request] + [NOTE] ==== @@ -89,7 +89,7 @@ If you have configured a custom service, the `*` value will not fetch data from ==== Default value is `*`. Examples: + - - `User, Incident, Requested Item, Knowledge, Change Request` + - `User, Incident, Requested Item, Knowledge, Change request` - `*` Enable document level security:: @@ -139,7 +139,7 @@ For default services, connectors use the following roles to find users who have | Knowledge | `admin`, `knowledge`, `knowledge_manager`, `knowledge_admin` -| Change Request | `admin`, `sn_change_read`, `itil` +| Change request | `admin`, `sn_change_read`, `itil` |=== For services other than these defaults, the connector iterates over access controls with `read` operations and finds the respective roles for those services. @@ -305,7 +305,7 @@ Comma-separated list of services to fetch data from ServiceNow. If the value is - link:https://docs.servicenow.com/bundle/tokyo-it-service-management/page/product/incident-management/concept/c_IncidentManagement.html[Incident] - link:https://docs.servicenow.com/bundle/tokyo-servicenow-platform/page/use/service-catalog-requests/task/t_AddNewRequestItems.html[Requested Item] - link:https://docs.servicenow.com/bundle/tokyo-customer-service-management/page/product/customer-service-management/task/t_SearchTheKnowledgeBase.html[Knowledge] -- link:https://docs.servicenow.com/bundle/tokyo-it-service-management/page/product/change-management/task/t_CreateAChange.html[Change Request] +- link:https://docs.servicenow.com/bundle/tokyo-it-service-management/page/product/change-management/task/t_CreateAChange.html[Change request] + [NOTE] ==== @@ -313,7 +313,7 @@ If you have configured a custom service, the `*` value will not fetch data from ==== Default value is `*`. Examples: + - - `User, Incident, Requested Item, Knowledge, Change Request` + - `User, Incident, Requested Item, Knowledge, Change request` - `*` `retry_count`:: @@ -374,7 +374,7 @@ For default services, connectors use the following roles to find users who have | Knowledge | `admin`, `knowledge`, `knowledge_manager`, `knowledge_admin` -| Change Request | `admin`, `sn_change_read`, `itil` +| Change request | `admin`, `sn_change_read`, `itil` |=== For services other than these defaults, the connector iterates over access controls with `read` operations and finds the respective roles for those services. From 2d854768bc98b34bd4ea8217aced2e1d95140aef Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 25 Oct 2024 12:37:38 +0200 Subject: [PATCH 56/60] Optimize threading in AbstractSearchAsyncAction (#113230) Forking when an action completes on the current thread is needlessly heavy handed in preventing stack-overflows. Also, we don't need locking/synchronization to deal with a worker-count + queue length problem. Both of these allow for non-trivial optimization even in the current execution model, also this change helps with moving to a more efficient execution model by saving needless forking to the search pool in particular. -> refactored the code to never fork but instead avoid stack-depth issues through use of a `SubscribableListener` -> replaced our home brew queue and semaphore combination by JDK primitives which saves blocking synchronization on task start and completion. --- .../search/AbstractSearchAsyncAction.java | 220 ++++++++---------- 1 file changed, 94 insertions(+), 126 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 0c585c705dcd0..cf25c5730d341 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -20,14 +20,13 @@ import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.search.TransportSearchAction.SearchTimeProvider; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AtomicArray; -import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.index.shard.ShardId; @@ -43,7 +42,6 @@ import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.transport.Transport; -import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -51,9 +49,12 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executor; +import java.util.concurrent.LinkedTransferQueue; +import java.util.concurrent.Semaphore; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiFunction; +import java.util.function.Consumer; import java.util.stream.Collectors; import static org.elasticsearch.core.Strings.format; @@ -238,7 +239,12 @@ public final void run() { assert shardRoutings.skip() == false; assert shardIndexMap.containsKey(shardRoutings); int shardIndex = shardIndexMap.get(shardRoutings); - performPhaseOnShard(shardIndex, shardRoutings, shardRoutings.nextOrNull()); + final SearchShardTarget routing = shardRoutings.nextOrNull(); + if (routing == null) { + failOnUnavailable(shardIndex, shardRoutings); + } else { + performPhaseOnShard(shardIndex, shardRoutings, routing); + } } } } @@ -258,7 +264,7 @@ private static boolean assertExecuteOnStartThread() { int index = 0; assert stackTraceElements[index++].getMethodName().equals("getStackTrace"); assert stackTraceElements[index++].getMethodName().equals("assertExecuteOnStartThread"); - assert stackTraceElements[index++].getMethodName().equals("performPhaseOnShard"); + assert stackTraceElements[index++].getMethodName().equals("failOnUnavailable"); if (stackTraceElements[index].getMethodName().equals("performPhaseOnShard")) { assert stackTraceElements[index].getClassName().endsWith("CanMatchPreFilterSearchPhase"); index++; @@ -277,65 +283,53 @@ private static boolean assertExecuteOnStartThread() { } protected void performPhaseOnShard(final int shardIndex, final SearchShardIterator shardIt, final SearchShardTarget shard) { - /* - * We capture the thread that this phase is starting on. When we are called back after executing the phase, we are either on the - * same thread (because we never went async, or the same thread was selected from the thread pool) or a different thread. If we - * continue on the same thread in the case that we never went async and this happens repeatedly we will end up recursing deeply and - * could stack overflow. To prevent this, we fork if we are called back on the same thread that execution started on and otherwise - * we can continue (cf. InitialSearchPhase#maybeFork). - */ - if (shard == null) { - assert assertExecuteOnStartThread(); - SearchShardTarget unassignedShard = new SearchShardTarget(null, shardIt.shardId(), shardIt.getClusterAlias()); - onShardFailure(shardIndex, unassignedShard, shardIt, new NoShardAvailableActionException(shardIt.shardId())); + if (throttleConcurrentRequests) { + var pendingExecutions = pendingExecutionsPerNode.computeIfAbsent( + shard.getNodeId(), + n -> new PendingExecutions(maxConcurrentRequestsPerNode) + ); + pendingExecutions.submit(l -> doPerformPhaseOnShard(shardIndex, shardIt, shard, l)); } else { - final PendingExecutions pendingExecutions = throttleConcurrentRequests - ? pendingExecutionsPerNode.computeIfAbsent(shard.getNodeId(), n -> new PendingExecutions(maxConcurrentRequestsPerNode)) - : null; - Runnable r = () -> { - final Thread thread = Thread.currentThread(); - try { - executePhaseOnShard(shardIt, shard, new SearchActionListener<>(shard, shardIndex) { - @Override - public void innerOnResponse(Result result) { - try { - onShardResult(result, shardIt); - } catch (Exception exc) { - onShardFailure(shardIndex, shard, shardIt, exc); - } finally { - executeNext(pendingExecutions, thread); - } - } + doPerformPhaseOnShard(shardIndex, shardIt, shard, () -> {}); + } + } - @Override - public void onFailure(Exception t) { - try { - onShardFailure(shardIndex, shard, shardIt, t); - } finally { - executeNext(pendingExecutions, thread); - } - } - }); - } catch (final Exception e) { - try { - /* - * It is possible to run into connection exceptions here because we are getting the connection early and might - * run into nodes that are not connected. In this case, on shard failure will move us to the next shard copy. - */ - fork(() -> onShardFailure(shardIndex, shard, shardIt, e)); - } finally { - executeNext(pendingExecutions, thread); + private void doPerformPhaseOnShard(int shardIndex, SearchShardIterator shardIt, SearchShardTarget shard, Releasable releasable) { + try { + executePhaseOnShard(shardIt, shard, new SearchActionListener<>(shard, shardIndex) { + @Override + public void innerOnResponse(Result result) { + try (releasable) { + onShardResult(result, shardIt); + } catch (Exception exc) { + onShardFailure(shardIndex, shard, shardIt, exc); } } - }; - if (throttleConcurrentRequests) { - pendingExecutions.tryRun(r); - } else { - r.run(); + + @Override + public void onFailure(Exception e) { + try (releasable) { + onShardFailure(shardIndex, shard, shardIt, e); + } + } + }); + } catch (final Exception e) { + /* + * It is possible to run into connection exceptions here because we are getting the connection early and might + * run into nodes that are not connected. In this case, on shard failure will move us to the next shard copy. + */ + try (releasable) { + onShardFailure(shardIndex, shard, shardIt, e); } } } + private void failOnUnavailable(int shardIndex, SearchShardIterator shardIt) { + assert assertExecuteOnStartThread(); + SearchShardTarget unassignedShard = new SearchShardTarget(null, shardIt.shardId(), shardIt.getClusterAlias()); + onShardFailure(shardIndex, unassignedShard, shardIt, new NoShardAvailableActionException(shardIt.shardId())); + } + /** * Sends the request to the actual shard. * @param shardIt the shards iterator @@ -348,34 +342,6 @@ protected abstract void executePhaseOnShard( SearchActionListener listener ); - protected void fork(final Runnable runnable) { - executor.execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - logger.error(() -> "unexpected error during [" + task + "]", e); - assert false : e; - } - - @Override - public void onRejection(Exception e) { - // avoid leaks during node shutdown by executing on the current thread if the executor shuts down - assert e instanceof EsRejectedExecutionException esre && esre.isExecutorShutdown() : e; - doRun(); - } - - @Override - protected void doRun() { - runnable.run(); - } - - @Override - public boolean isForceExecution() { - // we can not allow a stuffed queue to reject execution here - return true; - } - }); - } - @Override public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPhase) { /* This is the main search phase transition where we move to the next phase. If all shards @@ -794,61 +760,63 @@ protected final ShardSearchRequest buildShardSearchRequest(SearchShardIterator s */ protected abstract SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context); - private void executeNext(PendingExecutions pendingExecutions, Thread originalThread) { - executeNext(pendingExecutions == null ? null : pendingExecutions.finishAndGetNext(), originalThread); - } - - void executeNext(Runnable runnable, Thread originalThread) { - if (runnable != null) { - assert throttleConcurrentRequests; - if (originalThread == Thread.currentThread()) { - fork(runnable); - } else { - runnable.run(); - } - } - } - private static final class PendingExecutions { - private final int permits; - private int permitsTaken = 0; - private final ArrayDeque queue = new ArrayDeque<>(); + private final Semaphore semaphore; + private final LinkedTransferQueue> queue = new LinkedTransferQueue<>(); PendingExecutions(int permits) { assert permits > 0 : "not enough permits: " + permits; - this.permits = permits; + semaphore = new Semaphore(permits); } - Runnable finishAndGetNext() { - synchronized (this) { - permitsTaken--; - assert permitsTaken >= 0 : "illegal taken permits: " + permitsTaken; + void submit(Consumer task) { + if (semaphore.tryAcquire()) { + executeAndRelease(task); + } else { + queue.add(task); + if (semaphore.tryAcquire()) { + task = pollNextTaskOrReleasePermit(); + if (task != null) { + executeAndRelease(task); + } + } } - return tryQueue(null); + } - void tryRun(Runnable runnable) { - Runnable r = tryQueue(runnable); - if (r != null) { - r.run(); + private void executeAndRelease(Consumer task) { + while (task != null) { + final SubscribableListener onDone = new SubscribableListener<>(); + task.accept(() -> onDone.onResponse(null)); + if (onDone.isDone()) { + // keep going on the current thread, no need to fork + task = pollNextTaskOrReleasePermit(); + } else { + onDone.addListener(new ActionListener<>() { + @Override + public void onResponse(Void unused) { + final Consumer nextTask = pollNextTaskOrReleasePermit(); + if (nextTask != null) { + executeAndRelease(nextTask); + } + } + + @Override + public void onFailure(Exception e) { + assert false : e; + } + }); + return; + } } } - private synchronized Runnable tryQueue(Runnable runnable) { - Runnable toExecute = null; - if (permitsTaken < permits) { - permitsTaken++; - toExecute = runnable; - if (toExecute == null) { // only poll if we don't have anything to execute - toExecute = queue.poll(); - } - if (toExecute == null) { - permitsTaken--; - } - } else if (runnable != null) { - queue.add(runnable); + private Consumer pollNextTaskOrReleasePermit() { + var task = queue.poll(); + if (task == null) { + semaphore.release(); } - return toExecute; + return task; } } } From 13e67bdd0803914ac75ec13853828fec1b42d4a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20FOUCRET?= Date: Fri, 25 Oct 2024 12:43:13 +0200 Subject: [PATCH 57/60] Refactoring of the KQL grammar. (#115632) --- x-pack/plugin/kql/src/main/antlr/KqlBase.g4 | 95 +- .../plugin/kql/src/main/antlr/KqlBase.tokens | 31 +- .../kql/src/main/antlr/KqlBaseLexer.tokens | 31 +- .../xpack/kql/parser/KqlBase.interp | 28 +- .../xpack/kql/parser/KqlBaseBaseListener.java | 56 +- .../xpack/kql/parser/KqlBaseBaseVisitor.java | 30 +- .../xpack/kql/parser/KqlBaseLexer.interp | 22 +- .../xpack/kql/parser/KqlBaseLexer.java | 255 ++--- .../xpack/kql/parser/KqlBaseListener.java | 84 +- .../xpack/kql/parser/KqlBaseParser.java | 1010 ++++++++++------- .../xpack/kql/parser/KqlBaseVisitor.java | 44 +- .../kql/src/test/resources/supported-queries | 9 + .../src/test/resources/unsupported-queries | 8 - 13 files changed, 900 insertions(+), 803 deletions(-) diff --git a/x-pack/plugin/kql/src/main/antlr/KqlBase.g4 b/x-pack/plugin/kql/src/main/antlr/KqlBase.g4 index cffa2db9f959a..dbf7c1979796a 100644 --- a/x-pack/plugin/kql/src/main/antlr/KqlBase.g4 +++ b/x-pack/plugin/kql/src/main/antlr/KqlBase.g4 @@ -26,70 +26,68 @@ topLevelQuery ; query - : query (AND | OR) query #booleanQuery - | NOT subQuery=simpleQuery #notQuery - | simpleQuery #defaultQuery + : query operator=(AND | OR) query #booleanQuery + | NOT subQuery=simpleQuery #notQuery + | simpleQuery #defaultQuery ; simpleQuery : nestedQuery - | expression | parenthesizedQuery - ; - -expression - : fieldTermQuery - | fieldRangeQuery + | matchAllQuery + | existsQuery + | rangeQuery + | fieldQuery + | fieldLessQuery ; nestedQuery : fieldName COLON LEFT_CURLY_BRACKET query RIGHT_CURLY_BRACKET ; -parenthesizedQuery: - LEFT_PARENTHESIS query RIGHT_PARENTHESIS; - -fieldRangeQuery - : fieldName operator=OP_COMPARE rangeQueryValue +matchAllQuery + : (WILDCARD COLON)? WILDCARD ; -fieldTermQuery - : (fieldName COLON)? termQueryValue +parenthesizedQuery + : LEFT_PARENTHESIS query RIGHT_PARENTHESIS ; -fieldName - : wildcardExpression - | unquotedLiteralExpression - | quotedStringExpression +rangeQuery + : fieldName operator=(OP_LESS|OP_LESS_EQ|OP_MORE|OP_MORE_EQ) rangeQueryValue ; rangeQueryValue - : unquotedLiteralExpression - | quotedStringExpression - ; - -termQueryValue - : wildcardExpression - | quotedStringExpression - | termValue=unquotedLiteralExpression - | groupingTermExpression; + : (UNQUOTED_LITERAL|WILDCARD)+ + | QUOTED_STRING + ; -groupingTermExpression - : LEFT_PARENTHESIS unquotedLiteralExpression RIGHT_PARENTHESIS +existsQuery + :fieldName COLON WILDCARD ; -unquotedLiteralExpression - : UNQUOTED_LITERAL+ +fieldQuery + : fieldName COLON fieldQueryValue + | fieldName COLON LEFT_PARENTHESIS fieldQueryValue RIGHT_PARENTHESIS ; -quotedStringExpression - : QUOTED_STRING +fieldLessQuery + : fieldQueryValue + | LEFT_PARENTHESIS fieldQueryValue RIGHT_PARENTHESIS ; -wildcardExpression - : WILDCARD -; +fieldQueryValue + : (AND|OR)? (UNQUOTED_LITERAL | WILDCARD )+ + | (UNQUOTED_LITERAL | WILDCARD )+ (AND|OR)? + | (NOT|AND|OR) + | QUOTED_STRING + ; +fieldName + : value=UNQUOTED_LITERAL+ + | value=QUOTED_STRING + | value=WILDCARD + ; DEFAULT_SKIP: WHITESPACE -> skip; @@ -98,31 +96,34 @@ OR: 'or'; NOT: 'not'; COLON: ':'; -OP_COMPARE: OP_LESS | OP_MORE | OP_LESS_EQ | OP_MORE_EQ; +OP_LESS: '<'; +OP_LESS_EQ: '<='; +OP_MORE: '>'; +OP_MORE_EQ: '>='; LEFT_PARENTHESIS: '('; RIGHT_PARENTHESIS: ')'; LEFT_CURLY_BRACKET: '{'; RIGHT_CURLY_BRACKET: '}'; -UNQUOTED_LITERAL: WILDCARD* UNQUOTED_LITERAL_CHAR+ WILDCARD*; +UNQUOTED_LITERAL: UNQUOTED_LITERAL_CHAR+; QUOTED_STRING: '"'QUOTED_CHAR*'"'; -WILDCARD: WILDCARD_CHAR+; +WILDCARD: WILDCARD_CHAR; fragment WILDCARD_CHAR: '*'; -fragment OP_LESS: '<'; -fragment OP_LESS_EQ: '<='; -fragment OP_MORE: '>'; -fragment OP_MORE_EQ: '>='; fragment UNQUOTED_LITERAL_CHAR + : WILDCARD_CHAR* UNQUOTED_LITERAL_BASE_CHAR WILDCARD_CHAR* + | WILDCARD_CHAR WILDCARD_CHAR+ + ; + +fragment UNQUOTED_LITERAL_BASE_CHAR : ESCAPED_WHITESPACE | ESCAPED_SPECIAL_CHAR | ESCAPE_UNICODE_SEQUENCE | '\\' (AND | OR | NOT) - | WILDCARD_CHAR UNQUOTED_LITERAL_CHAR | NON_SPECIAL_CHAR ; @@ -135,7 +136,7 @@ fragment QUOTED_CHAR fragment WHITESPACE: [ \t\n\r\u3000]; fragment ESCAPED_WHITESPACE: '\\r' | '\\t' | '\\n'; -fragment NON_SPECIAL_CHAR: ~[ \\():<>"*{}]; +fragment NON_SPECIAL_CHAR: ~[ \n\r\t\u3000\\():<>"*{}]; fragment ESCAPED_SPECIAL_CHAR: '\\'[ \\():<>"*{}]; fragment ESCAPED_QUOTE: '\\"'; diff --git a/x-pack/plugin/kql/src/main/antlr/KqlBase.tokens b/x-pack/plugin/kql/src/main/antlr/KqlBase.tokens index 268ae0613b9f0..f26b6b9c3da55 100644 --- a/x-pack/plugin/kql/src/main/antlr/KqlBase.tokens +++ b/x-pack/plugin/kql/src/main/antlr/KqlBase.tokens @@ -3,19 +3,26 @@ AND=2 OR=3 NOT=4 COLON=5 -OP_COMPARE=6 -LEFT_PARENTHESIS=7 -RIGHT_PARENTHESIS=8 -LEFT_CURLY_BRACKET=9 -RIGHT_CURLY_BRACKET=10 -UNQUOTED_LITERAL=11 -QUOTED_STRING=12 -WILDCARD=13 +OP_LESS=6 +OP_LESS_EQ=7 +OP_MORE=8 +OP_MORE_EQ=9 +LEFT_PARENTHESIS=10 +RIGHT_PARENTHESIS=11 +LEFT_CURLY_BRACKET=12 +RIGHT_CURLY_BRACKET=13 +UNQUOTED_LITERAL=14 +QUOTED_STRING=15 +WILDCARD=16 'and'=2 'or'=3 'not'=4 ':'=5 -'('=7 -')'=8 -'{'=9 -'}'=10 +'<'=6 +'<='=7 +'>'=8 +'>='=9 +'('=10 +')'=11 +'{'=12 +'}'=13 diff --git a/x-pack/plugin/kql/src/main/antlr/KqlBaseLexer.tokens b/x-pack/plugin/kql/src/main/antlr/KqlBaseLexer.tokens index 268ae0613b9f0..f26b6b9c3da55 100644 --- a/x-pack/plugin/kql/src/main/antlr/KqlBaseLexer.tokens +++ b/x-pack/plugin/kql/src/main/antlr/KqlBaseLexer.tokens @@ -3,19 +3,26 @@ AND=2 OR=3 NOT=4 COLON=5 -OP_COMPARE=6 -LEFT_PARENTHESIS=7 -RIGHT_PARENTHESIS=8 -LEFT_CURLY_BRACKET=9 -RIGHT_CURLY_BRACKET=10 -UNQUOTED_LITERAL=11 -QUOTED_STRING=12 -WILDCARD=13 +OP_LESS=6 +OP_LESS_EQ=7 +OP_MORE=8 +OP_MORE_EQ=9 +LEFT_PARENTHESIS=10 +RIGHT_PARENTHESIS=11 +LEFT_CURLY_BRACKET=12 +RIGHT_CURLY_BRACKET=13 +UNQUOTED_LITERAL=14 +QUOTED_STRING=15 +WILDCARD=16 'and'=2 'or'=3 'not'=4 ':'=5 -'('=7 -')'=8 -'{'=9 -'}'=10 +'<'=6 +'<='=7 +'>'=8 +'>='=9 +'('=10 +')'=11 +'{'=12 +'}'=13 diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBase.interp b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBase.interp index 1954195b52363..111cac6d641b9 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBase.interp +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBase.interp @@ -5,7 +5,10 @@ null 'or' 'not' ':' -null +'<' +'<=' +'>' +'>=' '(' ')' '{' @@ -21,7 +24,10 @@ AND OR NOT COLON -OP_COMPARE +OP_LESS +OP_LESS_EQ +OP_MORE +OP_MORE_EQ LEFT_PARENTHESIS RIGHT_PARENTHESIS LEFT_CURLY_BRACKET @@ -34,19 +40,17 @@ rule names: topLevelQuery query simpleQuery -expression nestedQuery +matchAllQuery parenthesizedQuery -fieldRangeQuery -fieldTermQuery -fieldName +rangeQuery rangeQueryValue -termQueryValue -groupingTermExpression -unquotedLiteralExpression -quotedStringExpression -wildcardExpression +existsQuery +fieldQuery +fieldLessQuery +fieldQueryValue +fieldName atn: -[4, 1, 13, 108, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 1, 0, 3, 0, 32, 8, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 40, 8, 1, 1, 1, 1, 1, 1, 1, 5, 1, 45, 8, 1, 10, 1, 12, 1, 48, 9, 1, 1, 2, 1, 2, 1, 2, 3, 2, 53, 8, 2, 1, 3, 1, 3, 3, 3, 57, 8, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 3, 7, 76, 8, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 3, 8, 83, 8, 8, 1, 9, 1, 9, 3, 9, 87, 8, 9, 1, 10, 1, 10, 1, 10, 1, 10, 3, 10, 93, 8, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 12, 4, 12, 100, 8, 12, 11, 12, 12, 12, 101, 1, 13, 1, 13, 1, 14, 1, 14, 1, 14, 0, 1, 2, 15, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 0, 1, 1, 0, 2, 3, 106, 0, 31, 1, 0, 0, 0, 2, 39, 1, 0, 0, 0, 4, 52, 1, 0, 0, 0, 6, 56, 1, 0, 0, 0, 8, 58, 1, 0, 0, 0, 10, 64, 1, 0, 0, 0, 12, 68, 1, 0, 0, 0, 14, 75, 1, 0, 0, 0, 16, 82, 1, 0, 0, 0, 18, 86, 1, 0, 0, 0, 20, 92, 1, 0, 0, 0, 22, 94, 1, 0, 0, 0, 24, 99, 1, 0, 0, 0, 26, 103, 1, 0, 0, 0, 28, 105, 1, 0, 0, 0, 30, 32, 3, 2, 1, 0, 31, 30, 1, 0, 0, 0, 31, 32, 1, 0, 0, 0, 32, 33, 1, 0, 0, 0, 33, 34, 5, 0, 0, 1, 34, 1, 1, 0, 0, 0, 35, 36, 6, 1, -1, 0, 36, 37, 5, 4, 0, 0, 37, 40, 3, 4, 2, 0, 38, 40, 3, 4, 2, 0, 39, 35, 1, 0, 0, 0, 39, 38, 1, 0, 0, 0, 40, 46, 1, 0, 0, 0, 41, 42, 10, 3, 0, 0, 42, 43, 7, 0, 0, 0, 43, 45, 3, 2, 1, 4, 44, 41, 1, 0, 0, 0, 45, 48, 1, 0, 0, 0, 46, 44, 1, 0, 0, 0, 46, 47, 1, 0, 0, 0, 47, 3, 1, 0, 0, 0, 48, 46, 1, 0, 0, 0, 49, 53, 3, 8, 4, 0, 50, 53, 3, 6, 3, 0, 51, 53, 3, 10, 5, 0, 52, 49, 1, 0, 0, 0, 52, 50, 1, 0, 0, 0, 52, 51, 1, 0, 0, 0, 53, 5, 1, 0, 0, 0, 54, 57, 3, 14, 7, 0, 55, 57, 3, 12, 6, 0, 56, 54, 1, 0, 0, 0, 56, 55, 1, 0, 0, 0, 57, 7, 1, 0, 0, 0, 58, 59, 3, 16, 8, 0, 59, 60, 5, 5, 0, 0, 60, 61, 5, 9, 0, 0, 61, 62, 3, 2, 1, 0, 62, 63, 5, 10, 0, 0, 63, 9, 1, 0, 0, 0, 64, 65, 5, 7, 0, 0, 65, 66, 3, 2, 1, 0, 66, 67, 5, 8, 0, 0, 67, 11, 1, 0, 0, 0, 68, 69, 3, 16, 8, 0, 69, 70, 5, 6, 0, 0, 70, 71, 3, 18, 9, 0, 71, 13, 1, 0, 0, 0, 72, 73, 3, 16, 8, 0, 73, 74, 5, 5, 0, 0, 74, 76, 1, 0, 0, 0, 75, 72, 1, 0, 0, 0, 75, 76, 1, 0, 0, 0, 76, 77, 1, 0, 0, 0, 77, 78, 3, 20, 10, 0, 78, 15, 1, 0, 0, 0, 79, 83, 3, 28, 14, 0, 80, 83, 3, 24, 12, 0, 81, 83, 3, 26, 13, 0, 82, 79, 1, 0, 0, 0, 82, 80, 1, 0, 0, 0, 82, 81, 1, 0, 0, 0, 83, 17, 1, 0, 0, 0, 84, 87, 3, 24, 12, 0, 85, 87, 3, 26, 13, 0, 86, 84, 1, 0, 0, 0, 86, 85, 1, 0, 0, 0, 87, 19, 1, 0, 0, 0, 88, 93, 3, 28, 14, 0, 89, 93, 3, 26, 13, 0, 90, 93, 3, 24, 12, 0, 91, 93, 3, 22, 11, 0, 92, 88, 1, 0, 0, 0, 92, 89, 1, 0, 0, 0, 92, 90, 1, 0, 0, 0, 92, 91, 1, 0, 0, 0, 93, 21, 1, 0, 0, 0, 94, 95, 5, 7, 0, 0, 95, 96, 3, 24, 12, 0, 96, 97, 5, 8, 0, 0, 97, 23, 1, 0, 0, 0, 98, 100, 5, 11, 0, 0, 99, 98, 1, 0, 0, 0, 100, 101, 1, 0, 0, 0, 101, 99, 1, 0, 0, 0, 101, 102, 1, 0, 0, 0, 102, 25, 1, 0, 0, 0, 103, 104, 5, 12, 0, 0, 104, 27, 1, 0, 0, 0, 105, 106, 5, 13, 0, 0, 106, 29, 1, 0, 0, 0, 10, 31, 39, 46, 52, 56, 75, 82, 86, 92, 101] \ No newline at end of file +[4, 1, 16, 135, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 1, 0, 3, 0, 28, 8, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 36, 8, 1, 1, 1, 1, 1, 1, 1, 5, 1, 41, 8, 1, 10, 1, 12, 1, 44, 9, 1, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 3, 2, 53, 8, 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 4, 1, 4, 3, 4, 63, 8, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 6, 1, 6, 1, 7, 4, 7, 76, 8, 7, 11, 7, 12, 7, 77, 1, 7, 3, 7, 81, 8, 7, 1, 8, 1, 8, 1, 8, 1, 8, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 1, 9, 3, 9, 97, 8, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 3, 10, 104, 8, 10, 1, 11, 3, 11, 107, 8, 11, 1, 11, 4, 11, 110, 8, 11, 11, 11, 12, 11, 111, 1, 11, 4, 11, 115, 8, 11, 11, 11, 12, 11, 116, 1, 11, 3, 11, 120, 8, 11, 1, 11, 1, 11, 3, 11, 124, 8, 11, 1, 12, 4, 12, 127, 8, 12, 11, 12, 12, 12, 128, 1, 12, 1, 12, 3, 12, 133, 8, 12, 1, 12, 0, 1, 2, 13, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 0, 4, 1, 0, 2, 3, 1, 0, 6, 9, 2, 0, 14, 14, 16, 16, 1, 0, 2, 4, 145, 0, 27, 1, 0, 0, 0, 2, 35, 1, 0, 0, 0, 4, 52, 1, 0, 0, 0, 6, 54, 1, 0, 0, 0, 8, 62, 1, 0, 0, 0, 10, 66, 1, 0, 0, 0, 12, 70, 1, 0, 0, 0, 14, 80, 1, 0, 0, 0, 16, 82, 1, 0, 0, 0, 18, 96, 1, 0, 0, 0, 20, 103, 1, 0, 0, 0, 22, 123, 1, 0, 0, 0, 24, 132, 1, 0, 0, 0, 26, 28, 3, 2, 1, 0, 27, 26, 1, 0, 0, 0, 27, 28, 1, 0, 0, 0, 28, 29, 1, 0, 0, 0, 29, 30, 5, 0, 0, 1, 30, 1, 1, 0, 0, 0, 31, 32, 6, 1, -1, 0, 32, 33, 5, 4, 0, 0, 33, 36, 3, 4, 2, 0, 34, 36, 3, 4, 2, 0, 35, 31, 1, 0, 0, 0, 35, 34, 1, 0, 0, 0, 36, 42, 1, 0, 0, 0, 37, 38, 10, 3, 0, 0, 38, 39, 7, 0, 0, 0, 39, 41, 3, 2, 1, 3, 40, 37, 1, 0, 0, 0, 41, 44, 1, 0, 0, 0, 42, 40, 1, 0, 0, 0, 42, 43, 1, 0, 0, 0, 43, 3, 1, 0, 0, 0, 44, 42, 1, 0, 0, 0, 45, 53, 3, 6, 3, 0, 46, 53, 3, 10, 5, 0, 47, 53, 3, 8, 4, 0, 48, 53, 3, 16, 8, 0, 49, 53, 3, 12, 6, 0, 50, 53, 3, 18, 9, 0, 51, 53, 3, 20, 10, 0, 52, 45, 1, 0, 0, 0, 52, 46, 1, 0, 0, 0, 52, 47, 1, 0, 0, 0, 52, 48, 1, 0, 0, 0, 52, 49, 1, 0, 0, 0, 52, 50, 1, 0, 0, 0, 52, 51, 1, 0, 0, 0, 53, 5, 1, 0, 0, 0, 54, 55, 3, 24, 12, 0, 55, 56, 5, 5, 0, 0, 56, 57, 5, 12, 0, 0, 57, 58, 3, 2, 1, 0, 58, 59, 5, 13, 0, 0, 59, 7, 1, 0, 0, 0, 60, 61, 5, 16, 0, 0, 61, 63, 5, 5, 0, 0, 62, 60, 1, 0, 0, 0, 62, 63, 1, 0, 0, 0, 63, 64, 1, 0, 0, 0, 64, 65, 5, 16, 0, 0, 65, 9, 1, 0, 0, 0, 66, 67, 5, 10, 0, 0, 67, 68, 3, 2, 1, 0, 68, 69, 5, 11, 0, 0, 69, 11, 1, 0, 0, 0, 70, 71, 3, 24, 12, 0, 71, 72, 7, 1, 0, 0, 72, 73, 3, 14, 7, 0, 73, 13, 1, 0, 0, 0, 74, 76, 7, 2, 0, 0, 75, 74, 1, 0, 0, 0, 76, 77, 1, 0, 0, 0, 77, 75, 1, 0, 0, 0, 77, 78, 1, 0, 0, 0, 78, 81, 1, 0, 0, 0, 79, 81, 5, 15, 0, 0, 80, 75, 1, 0, 0, 0, 80, 79, 1, 0, 0, 0, 81, 15, 1, 0, 0, 0, 82, 83, 3, 24, 12, 0, 83, 84, 5, 5, 0, 0, 84, 85, 5, 16, 0, 0, 85, 17, 1, 0, 0, 0, 86, 87, 3, 24, 12, 0, 87, 88, 5, 5, 0, 0, 88, 89, 3, 22, 11, 0, 89, 97, 1, 0, 0, 0, 90, 91, 3, 24, 12, 0, 91, 92, 5, 5, 0, 0, 92, 93, 5, 10, 0, 0, 93, 94, 3, 22, 11, 0, 94, 95, 5, 11, 0, 0, 95, 97, 1, 0, 0, 0, 96, 86, 1, 0, 0, 0, 96, 90, 1, 0, 0, 0, 97, 19, 1, 0, 0, 0, 98, 104, 3, 22, 11, 0, 99, 100, 5, 10, 0, 0, 100, 101, 3, 22, 11, 0, 101, 102, 5, 11, 0, 0, 102, 104, 1, 0, 0, 0, 103, 98, 1, 0, 0, 0, 103, 99, 1, 0, 0, 0, 104, 21, 1, 0, 0, 0, 105, 107, 7, 0, 0, 0, 106, 105, 1, 0, 0, 0, 106, 107, 1, 0, 0, 0, 107, 109, 1, 0, 0, 0, 108, 110, 7, 2, 0, 0, 109, 108, 1, 0, 0, 0, 110, 111, 1, 0, 0, 0, 111, 109, 1, 0, 0, 0, 111, 112, 1, 0, 0, 0, 112, 124, 1, 0, 0, 0, 113, 115, 7, 2, 0, 0, 114, 113, 1, 0, 0, 0, 115, 116, 1, 0, 0, 0, 116, 114, 1, 0, 0, 0, 116, 117, 1, 0, 0, 0, 117, 119, 1, 0, 0, 0, 118, 120, 7, 0, 0, 0, 119, 118, 1, 0, 0, 0, 119, 120, 1, 0, 0, 0, 120, 124, 1, 0, 0, 0, 121, 124, 7, 3, 0, 0, 122, 124, 5, 15, 0, 0, 123, 106, 1, 0, 0, 0, 123, 114, 1, 0, 0, 0, 123, 121, 1, 0, 0, 0, 123, 122, 1, 0, 0, 0, 124, 23, 1, 0, 0, 0, 125, 127, 5, 14, 0, 0, 126, 125, 1, 0, 0, 0, 127, 128, 1, 0, 0, 0, 128, 126, 1, 0, 0, 0, 128, 129, 1, 0, 0, 0, 129, 133, 1, 0, 0, 0, 130, 133, 5, 15, 0, 0, 131, 133, 5, 16, 0, 0, 132, 126, 1, 0, 0, 0, 132, 130, 1, 0, 0, 0, 132, 131, 1, 0, 0, 0, 133, 25, 1, 0, 0, 0, 16, 27, 35, 42, 52, 62, 77, 80, 96, 103, 106, 111, 116, 119, 123, 128, 132] \ No newline at end of file diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseListener.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseListener.java index 1b4282b5dbbea..426af7f7115b9 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseListener.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseListener.java @@ -80,18 +80,6 @@ class KqlBaseBaseListener implements KqlBaseListener { *

The default implementation does nothing.

*/ @Override public void exitSimpleQuery(KqlBaseParser.SimpleQueryContext ctx) { } - /** - * {@inheritDoc} - * - *

The default implementation does nothing.

- */ - @Override public void enterExpression(KqlBaseParser.ExpressionContext ctx) { } - /** - * {@inheritDoc} - * - *

The default implementation does nothing.

- */ - @Override public void exitExpression(KqlBaseParser.ExpressionContext ctx) { } /** * {@inheritDoc} * @@ -109,49 +97,37 @@ class KqlBaseBaseListener implements KqlBaseListener { * *

The default implementation does nothing.

*/ - @Override public void enterParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx) { } - /** - * {@inheritDoc} - * - *

The default implementation does nothing.

- */ - @Override public void exitParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx) { } - /** - * {@inheritDoc} - * - *

The default implementation does nothing.

- */ - @Override public void enterFieldRangeQuery(KqlBaseParser.FieldRangeQueryContext ctx) { } + @Override public void enterMatchAllQuery(KqlBaseParser.MatchAllQueryContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void exitFieldRangeQuery(KqlBaseParser.FieldRangeQueryContext ctx) { } + @Override public void exitMatchAllQuery(KqlBaseParser.MatchAllQueryContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void enterFieldTermQuery(KqlBaseParser.FieldTermQueryContext ctx) { } + @Override public void enterParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void exitFieldTermQuery(KqlBaseParser.FieldTermQueryContext ctx) { } + @Override public void exitParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void enterFieldName(KqlBaseParser.FieldNameContext ctx) { } + @Override public void enterRangeQuery(KqlBaseParser.RangeQueryContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void exitFieldName(KqlBaseParser.FieldNameContext ctx) { } + @Override public void exitRangeQuery(KqlBaseParser.RangeQueryContext ctx) { } /** * {@inheritDoc} * @@ -169,61 +145,61 @@ class KqlBaseBaseListener implements KqlBaseListener { * *

The default implementation does nothing.

*/ - @Override public void enterTermQueryValue(KqlBaseParser.TermQueryValueContext ctx) { } + @Override public void enterExistsQuery(KqlBaseParser.ExistsQueryContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void exitTermQueryValue(KqlBaseParser.TermQueryValueContext ctx) { } + @Override public void exitExistsQuery(KqlBaseParser.ExistsQueryContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void enterGroupingTermExpression(KqlBaseParser.GroupingTermExpressionContext ctx) { } + @Override public void enterFieldQuery(KqlBaseParser.FieldQueryContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void exitGroupingTermExpression(KqlBaseParser.GroupingTermExpressionContext ctx) { } + @Override public void exitFieldQuery(KqlBaseParser.FieldQueryContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void enterUnquotedLiteralExpression(KqlBaseParser.UnquotedLiteralExpressionContext ctx) { } + @Override public void enterFieldLessQuery(KqlBaseParser.FieldLessQueryContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void exitUnquotedLiteralExpression(KqlBaseParser.UnquotedLiteralExpressionContext ctx) { } + @Override public void exitFieldLessQuery(KqlBaseParser.FieldLessQueryContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void enterQuotedStringExpression(KqlBaseParser.QuotedStringExpressionContext ctx) { } + @Override public void enterFieldQueryValue(KqlBaseParser.FieldQueryValueContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void exitQuotedStringExpression(KqlBaseParser.QuotedStringExpressionContext ctx) { } + @Override public void exitFieldQueryValue(KqlBaseParser.FieldQueryValueContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void enterWildcardExpression(KqlBaseParser.WildcardExpressionContext ctx) { } + @Override public void enterFieldName(KqlBaseParser.FieldNameContext ctx) { } /** * {@inheritDoc} * *

The default implementation does nothing.

*/ - @Override public void exitWildcardExpression(KqlBaseParser.WildcardExpressionContext ctx) { } + @Override public void exitFieldName(KqlBaseParser.FieldNameContext ctx) { } /** * {@inheritDoc} diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseVisitor.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseVisitor.java index 09cd668804154..cf1f2b3972823 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseVisitor.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseBaseVisitor.java @@ -55,13 +55,6 @@ class KqlBaseBaseVisitor extends AbstractParseTreeVisitor implements KqlBa * {@link #visitChildren} on {@code ctx}.

*/ @Override public T visitSimpleQuery(KqlBaseParser.SimpleQueryContext ctx) { return visitChildren(ctx); } - /** - * {@inheritDoc} - * - *

The default implementation returns the result of calling - * {@link #visitChildren} on {@code ctx}.

- */ - @Override public T visitExpression(KqlBaseParser.ExpressionContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * @@ -75,28 +68,21 @@ class KqlBaseBaseVisitor extends AbstractParseTreeVisitor implements KqlBa *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx) { return visitChildren(ctx); } - /** - * {@inheritDoc} - * - *

The default implementation returns the result of calling - * {@link #visitChildren} on {@code ctx}.

- */ - @Override public T visitFieldRangeQuery(KqlBaseParser.FieldRangeQueryContext ctx) { return visitChildren(ctx); } + @Override public T visitMatchAllQuery(KqlBaseParser.MatchAllQueryContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitFieldTermQuery(KqlBaseParser.FieldTermQueryContext ctx) { return visitChildren(ctx); } + @Override public T visitParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitFieldName(KqlBaseParser.FieldNameContext ctx) { return visitChildren(ctx); } + @Override public T visitRangeQuery(KqlBaseParser.RangeQueryContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * @@ -110,33 +96,33 @@ class KqlBaseBaseVisitor extends AbstractParseTreeVisitor implements KqlBa *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitTermQueryValue(KqlBaseParser.TermQueryValueContext ctx) { return visitChildren(ctx); } + @Override public T visitExistsQuery(KqlBaseParser.ExistsQueryContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitGroupingTermExpression(KqlBaseParser.GroupingTermExpressionContext ctx) { return visitChildren(ctx); } + @Override public T visitFieldQuery(KqlBaseParser.FieldQueryContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitUnquotedLiteralExpression(KqlBaseParser.UnquotedLiteralExpressionContext ctx) { return visitChildren(ctx); } + @Override public T visitFieldLessQuery(KqlBaseParser.FieldLessQueryContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitQuotedStringExpression(KqlBaseParser.QuotedStringExpressionContext ctx) { return visitChildren(ctx); } + @Override public T visitFieldQueryValue(KqlBaseParser.FieldQueryValueContext ctx) { return visitChildren(ctx); } /** * {@inheritDoc} * *

The default implementation returns the result of calling * {@link #visitChildren} on {@code ctx}.

*/ - @Override public T visitWildcardExpression(KqlBaseParser.WildcardExpressionContext ctx) { return visitChildren(ctx); } + @Override public T visitFieldName(KqlBaseParser.FieldNameContext ctx) { return visitChildren(ctx); } } diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseLexer.interp b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseLexer.interp index d178df5fcbc88..f9afe07af3b40 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseLexer.interp +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseLexer.interp @@ -5,7 +5,10 @@ null 'or' 'not' ':' -null +'<' +'<=' +'>' +'>=' '(' ')' '{' @@ -21,7 +24,10 @@ AND OR NOT COLON -OP_COMPARE +OP_LESS +OP_LESS_EQ +OP_MORE +OP_MORE_EQ LEFT_PARENTHESIS RIGHT_PARENTHESIS LEFT_CURLY_BRACKET @@ -36,7 +42,10 @@ AND OR NOT COLON -OP_COMPARE +OP_LESS +OP_LESS_EQ +OP_MORE +OP_MORE_EQ LEFT_PARENTHESIS RIGHT_PARENTHESIS LEFT_CURLY_BRACKET @@ -45,11 +54,8 @@ UNQUOTED_LITERAL QUOTED_STRING WILDCARD WILDCARD_CHAR -OP_LESS -OP_LESS_EQ -OP_MORE -OP_MORE_EQ UNQUOTED_LITERAL_CHAR +UNQUOTED_LITERAL_BASE_CHAR QUOTED_CHAR WHITESPACE ESCAPED_WHITESPACE @@ -68,4 +74,4 @@ mode names: DEFAULT_MODE atn: -[4, 0, 13, 181, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 5, 3, 5, 79, 8, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 8, 1, 8, 1, 9, 1, 9, 1, 10, 5, 10, 90, 8, 10, 10, 10, 12, 10, 93, 9, 10, 1, 10, 4, 10, 96, 8, 10, 11, 10, 12, 10, 97, 1, 10, 5, 10, 101, 8, 10, 10, 10, 12, 10, 104, 9, 10, 1, 11, 1, 11, 5, 11, 108, 8, 11, 10, 11, 12, 11, 111, 9, 11, 1, 11, 1, 11, 1, 12, 4, 12, 116, 8, 12, 11, 12, 12, 12, 117, 1, 13, 1, 13, 1, 14, 1, 14, 1, 15, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 1, 17, 1, 17, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 3, 18, 139, 8, 18, 1, 18, 1, 18, 1, 18, 1, 18, 3, 18, 145, 8, 18, 1, 19, 1, 19, 1, 19, 1, 19, 3, 19, 151, 8, 19, 1, 20, 1, 20, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 3, 21, 161, 8, 21, 1, 22, 1, 22, 1, 23, 1, 23, 1, 23, 1, 24, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 27, 1, 27, 0, 0, 28, 1, 1, 3, 2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, 0, 29, 0, 31, 0, 33, 0, 35, 0, 37, 0, 39, 0, 41, 0, 43, 0, 45, 0, 47, 0, 49, 0, 51, 0, 53, 0, 55, 0, 1, 0, 11, 2, 0, 65, 65, 97, 97, 2, 0, 78, 78, 110, 110, 2, 0, 68, 68, 100, 100, 2, 0, 79, 79, 111, 111, 2, 0, 82, 82, 114, 114, 2, 0, 84, 84, 116, 116, 1, 0, 34, 34, 4, 0, 9, 10, 13, 13, 32, 32, 12288, 12288, 9, 0, 32, 32, 34, 34, 40, 42, 58, 58, 60, 60, 62, 62, 92, 92, 123, 123, 125, 125, 2, 0, 85, 85, 117, 117, 3, 0, 48, 57, 65, 70, 97, 102, 185, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 1, 57, 1, 0, 0, 0, 3, 61, 1, 0, 0, 0, 5, 65, 1, 0, 0, 0, 7, 68, 1, 0, 0, 0, 9, 72, 1, 0, 0, 0, 11, 78, 1, 0, 0, 0, 13, 80, 1, 0, 0, 0, 15, 82, 1, 0, 0, 0, 17, 84, 1, 0, 0, 0, 19, 86, 1, 0, 0, 0, 21, 91, 1, 0, 0, 0, 23, 105, 1, 0, 0, 0, 25, 115, 1, 0, 0, 0, 27, 119, 1, 0, 0, 0, 29, 121, 1, 0, 0, 0, 31, 123, 1, 0, 0, 0, 33, 126, 1, 0, 0, 0, 35, 128, 1, 0, 0, 0, 37, 144, 1, 0, 0, 0, 39, 150, 1, 0, 0, 0, 41, 152, 1, 0, 0, 0, 43, 160, 1, 0, 0, 0, 45, 162, 1, 0, 0, 0, 47, 164, 1, 0, 0, 0, 49, 167, 1, 0, 0, 0, 51, 170, 1, 0, 0, 0, 53, 173, 1, 0, 0, 0, 55, 179, 1, 0, 0, 0, 57, 58, 3, 41, 20, 0, 58, 59, 1, 0, 0, 0, 59, 60, 6, 0, 0, 0, 60, 2, 1, 0, 0, 0, 61, 62, 7, 0, 0, 0, 62, 63, 7, 1, 0, 0, 63, 64, 7, 2, 0, 0, 64, 4, 1, 0, 0, 0, 65, 66, 7, 3, 0, 0, 66, 67, 7, 4, 0, 0, 67, 6, 1, 0, 0, 0, 68, 69, 7, 1, 0, 0, 69, 70, 7, 3, 0, 0, 70, 71, 7, 5, 0, 0, 71, 8, 1, 0, 0, 0, 72, 73, 5, 58, 0, 0, 73, 10, 1, 0, 0, 0, 74, 79, 3, 29, 14, 0, 75, 79, 3, 33, 16, 0, 76, 79, 3, 31, 15, 0, 77, 79, 3, 35, 17, 0, 78, 74, 1, 0, 0, 0, 78, 75, 1, 0, 0, 0, 78, 76, 1, 0, 0, 0, 78, 77, 1, 0, 0, 0, 79, 12, 1, 0, 0, 0, 80, 81, 5, 40, 0, 0, 81, 14, 1, 0, 0, 0, 82, 83, 5, 41, 0, 0, 83, 16, 1, 0, 0, 0, 84, 85, 5, 123, 0, 0, 85, 18, 1, 0, 0, 0, 86, 87, 5, 125, 0, 0, 87, 20, 1, 0, 0, 0, 88, 90, 3, 25, 12, 0, 89, 88, 1, 0, 0, 0, 90, 93, 1, 0, 0, 0, 91, 89, 1, 0, 0, 0, 91, 92, 1, 0, 0, 0, 92, 95, 1, 0, 0, 0, 93, 91, 1, 0, 0, 0, 94, 96, 3, 37, 18, 0, 95, 94, 1, 0, 0, 0, 96, 97, 1, 0, 0, 0, 97, 95, 1, 0, 0, 0, 97, 98, 1, 0, 0, 0, 98, 102, 1, 0, 0, 0, 99, 101, 3, 25, 12, 0, 100, 99, 1, 0, 0, 0, 101, 104, 1, 0, 0, 0, 102, 100, 1, 0, 0, 0, 102, 103, 1, 0, 0, 0, 103, 22, 1, 0, 0, 0, 104, 102, 1, 0, 0, 0, 105, 109, 5, 34, 0, 0, 106, 108, 3, 39, 19, 0, 107, 106, 1, 0, 0, 0, 108, 111, 1, 0, 0, 0, 109, 107, 1, 0, 0, 0, 109, 110, 1, 0, 0, 0, 110, 112, 1, 0, 0, 0, 111, 109, 1, 0, 0, 0, 112, 113, 5, 34, 0, 0, 113, 24, 1, 0, 0, 0, 114, 116, 3, 27, 13, 0, 115, 114, 1, 0, 0, 0, 116, 117, 1, 0, 0, 0, 117, 115, 1, 0, 0, 0, 117, 118, 1, 0, 0, 0, 118, 26, 1, 0, 0, 0, 119, 120, 5, 42, 0, 0, 120, 28, 1, 0, 0, 0, 121, 122, 5, 60, 0, 0, 122, 30, 1, 0, 0, 0, 123, 124, 5, 60, 0, 0, 124, 125, 5, 61, 0, 0, 125, 32, 1, 0, 0, 0, 126, 127, 5, 62, 0, 0, 127, 34, 1, 0, 0, 0, 128, 129, 5, 62, 0, 0, 129, 130, 5, 61, 0, 0, 130, 36, 1, 0, 0, 0, 131, 145, 3, 43, 21, 0, 132, 145, 3, 47, 23, 0, 133, 145, 3, 51, 25, 0, 134, 138, 5, 92, 0, 0, 135, 139, 3, 3, 1, 0, 136, 139, 3, 5, 2, 0, 137, 139, 3, 7, 3, 0, 138, 135, 1, 0, 0, 0, 138, 136, 1, 0, 0, 0, 138, 137, 1, 0, 0, 0, 139, 145, 1, 0, 0, 0, 140, 141, 3, 27, 13, 0, 141, 142, 3, 37, 18, 0, 142, 145, 1, 0, 0, 0, 143, 145, 3, 45, 22, 0, 144, 131, 1, 0, 0, 0, 144, 132, 1, 0, 0, 0, 144, 133, 1, 0, 0, 0, 144, 134, 1, 0, 0, 0, 144, 140, 1, 0, 0, 0, 144, 143, 1, 0, 0, 0, 145, 38, 1, 0, 0, 0, 146, 151, 3, 43, 21, 0, 147, 151, 3, 51, 25, 0, 148, 151, 3, 49, 24, 0, 149, 151, 8, 6, 0, 0, 150, 146, 1, 0, 0, 0, 150, 147, 1, 0, 0, 0, 150, 148, 1, 0, 0, 0, 150, 149, 1, 0, 0, 0, 151, 40, 1, 0, 0, 0, 152, 153, 7, 7, 0, 0, 153, 42, 1, 0, 0, 0, 154, 155, 5, 92, 0, 0, 155, 161, 7, 4, 0, 0, 156, 157, 5, 92, 0, 0, 157, 161, 7, 5, 0, 0, 158, 159, 5, 92, 0, 0, 159, 161, 7, 1, 0, 0, 160, 154, 1, 0, 0, 0, 160, 156, 1, 0, 0, 0, 160, 158, 1, 0, 0, 0, 161, 44, 1, 0, 0, 0, 162, 163, 8, 8, 0, 0, 163, 46, 1, 0, 0, 0, 164, 165, 5, 92, 0, 0, 165, 166, 7, 8, 0, 0, 166, 48, 1, 0, 0, 0, 167, 168, 5, 92, 0, 0, 168, 169, 5, 34, 0, 0, 169, 50, 1, 0, 0, 0, 170, 171, 5, 92, 0, 0, 171, 172, 3, 53, 26, 0, 172, 52, 1, 0, 0, 0, 173, 174, 7, 9, 0, 0, 174, 175, 3, 55, 27, 0, 175, 176, 3, 55, 27, 0, 176, 177, 3, 55, 27, 0, 177, 178, 3, 55, 27, 0, 178, 54, 1, 0, 0, 0, 179, 180, 7, 10, 0, 0, 180, 56, 1, 0, 0, 0, 11, 0, 78, 91, 97, 102, 109, 117, 138, 144, 150, 160, 1, 6, 0, 0] \ No newline at end of file +[4, 0, 16, 178, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 4, 1, 4, 1, 5, 1, 5, 1, 6, 1, 6, 1, 6, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 4, 13, 94, 8, 13, 11, 13, 12, 13, 95, 1, 14, 1, 14, 5, 14, 100, 8, 14, 10, 14, 12, 14, 103, 9, 14, 1, 14, 1, 14, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 5, 17, 112, 8, 17, 10, 17, 12, 17, 115, 9, 17, 1, 17, 1, 17, 5, 17, 119, 8, 17, 10, 17, 12, 17, 122, 9, 17, 1, 17, 1, 17, 4, 17, 126, 8, 17, 11, 17, 12, 17, 127, 3, 17, 130, 8, 17, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 3, 18, 139, 8, 18, 1, 18, 3, 18, 142, 8, 18, 1, 19, 1, 19, 1, 19, 1, 19, 3, 19, 148, 8, 19, 1, 20, 1, 20, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 3, 21, 158, 8, 21, 1, 22, 1, 22, 1, 23, 1, 23, 1, 23, 1, 24, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 27, 1, 27, 0, 0, 28, 1, 1, 3, 2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, 14, 29, 15, 31, 16, 33, 0, 35, 0, 37, 0, 39, 0, 41, 0, 43, 0, 45, 0, 47, 0, 49, 0, 51, 0, 53, 0, 55, 0, 1, 0, 12, 2, 0, 65, 65, 97, 97, 2, 0, 78, 78, 110, 110, 2, 0, 68, 68, 100, 100, 2, 0, 79, 79, 111, 111, 2, 0, 82, 82, 114, 114, 2, 0, 84, 84, 116, 116, 1, 0, 34, 34, 4, 0, 9, 10, 13, 13, 32, 32, 12288, 12288, 12, 0, 9, 10, 13, 13, 32, 32, 34, 34, 40, 42, 58, 58, 60, 60, 62, 62, 92, 92, 123, 123, 125, 125, 12288, 12288, 9, 0, 32, 32, 34, 34, 40, 42, 58, 58, 60, 60, 62, 62, 92, 92, 123, 123, 125, 125, 2, 0, 85, 85, 117, 117, 3, 0, 48, 57, 65, 70, 97, 102, 182, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 1, 57, 1, 0, 0, 0, 3, 61, 1, 0, 0, 0, 5, 65, 1, 0, 0, 0, 7, 68, 1, 0, 0, 0, 9, 72, 1, 0, 0, 0, 11, 74, 1, 0, 0, 0, 13, 76, 1, 0, 0, 0, 15, 79, 1, 0, 0, 0, 17, 81, 1, 0, 0, 0, 19, 84, 1, 0, 0, 0, 21, 86, 1, 0, 0, 0, 23, 88, 1, 0, 0, 0, 25, 90, 1, 0, 0, 0, 27, 93, 1, 0, 0, 0, 29, 97, 1, 0, 0, 0, 31, 106, 1, 0, 0, 0, 33, 108, 1, 0, 0, 0, 35, 129, 1, 0, 0, 0, 37, 141, 1, 0, 0, 0, 39, 147, 1, 0, 0, 0, 41, 149, 1, 0, 0, 0, 43, 157, 1, 0, 0, 0, 45, 159, 1, 0, 0, 0, 47, 161, 1, 0, 0, 0, 49, 164, 1, 0, 0, 0, 51, 167, 1, 0, 0, 0, 53, 170, 1, 0, 0, 0, 55, 176, 1, 0, 0, 0, 57, 58, 3, 41, 20, 0, 58, 59, 1, 0, 0, 0, 59, 60, 6, 0, 0, 0, 60, 2, 1, 0, 0, 0, 61, 62, 7, 0, 0, 0, 62, 63, 7, 1, 0, 0, 63, 64, 7, 2, 0, 0, 64, 4, 1, 0, 0, 0, 65, 66, 7, 3, 0, 0, 66, 67, 7, 4, 0, 0, 67, 6, 1, 0, 0, 0, 68, 69, 7, 1, 0, 0, 69, 70, 7, 3, 0, 0, 70, 71, 7, 5, 0, 0, 71, 8, 1, 0, 0, 0, 72, 73, 5, 58, 0, 0, 73, 10, 1, 0, 0, 0, 74, 75, 5, 60, 0, 0, 75, 12, 1, 0, 0, 0, 76, 77, 5, 60, 0, 0, 77, 78, 5, 61, 0, 0, 78, 14, 1, 0, 0, 0, 79, 80, 5, 62, 0, 0, 80, 16, 1, 0, 0, 0, 81, 82, 5, 62, 0, 0, 82, 83, 5, 61, 0, 0, 83, 18, 1, 0, 0, 0, 84, 85, 5, 40, 0, 0, 85, 20, 1, 0, 0, 0, 86, 87, 5, 41, 0, 0, 87, 22, 1, 0, 0, 0, 88, 89, 5, 123, 0, 0, 89, 24, 1, 0, 0, 0, 90, 91, 5, 125, 0, 0, 91, 26, 1, 0, 0, 0, 92, 94, 3, 35, 17, 0, 93, 92, 1, 0, 0, 0, 94, 95, 1, 0, 0, 0, 95, 93, 1, 0, 0, 0, 95, 96, 1, 0, 0, 0, 96, 28, 1, 0, 0, 0, 97, 101, 5, 34, 0, 0, 98, 100, 3, 39, 19, 0, 99, 98, 1, 0, 0, 0, 100, 103, 1, 0, 0, 0, 101, 99, 1, 0, 0, 0, 101, 102, 1, 0, 0, 0, 102, 104, 1, 0, 0, 0, 103, 101, 1, 0, 0, 0, 104, 105, 5, 34, 0, 0, 105, 30, 1, 0, 0, 0, 106, 107, 3, 33, 16, 0, 107, 32, 1, 0, 0, 0, 108, 109, 5, 42, 0, 0, 109, 34, 1, 0, 0, 0, 110, 112, 3, 33, 16, 0, 111, 110, 1, 0, 0, 0, 112, 115, 1, 0, 0, 0, 113, 111, 1, 0, 0, 0, 113, 114, 1, 0, 0, 0, 114, 116, 1, 0, 0, 0, 115, 113, 1, 0, 0, 0, 116, 120, 3, 37, 18, 0, 117, 119, 3, 33, 16, 0, 118, 117, 1, 0, 0, 0, 119, 122, 1, 0, 0, 0, 120, 118, 1, 0, 0, 0, 120, 121, 1, 0, 0, 0, 121, 130, 1, 0, 0, 0, 122, 120, 1, 0, 0, 0, 123, 125, 3, 33, 16, 0, 124, 126, 3, 33, 16, 0, 125, 124, 1, 0, 0, 0, 126, 127, 1, 0, 0, 0, 127, 125, 1, 0, 0, 0, 127, 128, 1, 0, 0, 0, 128, 130, 1, 0, 0, 0, 129, 113, 1, 0, 0, 0, 129, 123, 1, 0, 0, 0, 130, 36, 1, 0, 0, 0, 131, 142, 3, 43, 21, 0, 132, 142, 3, 47, 23, 0, 133, 142, 3, 51, 25, 0, 134, 138, 5, 92, 0, 0, 135, 139, 3, 3, 1, 0, 136, 139, 3, 5, 2, 0, 137, 139, 3, 7, 3, 0, 138, 135, 1, 0, 0, 0, 138, 136, 1, 0, 0, 0, 138, 137, 1, 0, 0, 0, 139, 142, 1, 0, 0, 0, 140, 142, 3, 45, 22, 0, 141, 131, 1, 0, 0, 0, 141, 132, 1, 0, 0, 0, 141, 133, 1, 0, 0, 0, 141, 134, 1, 0, 0, 0, 141, 140, 1, 0, 0, 0, 142, 38, 1, 0, 0, 0, 143, 148, 3, 43, 21, 0, 144, 148, 3, 51, 25, 0, 145, 148, 3, 49, 24, 0, 146, 148, 8, 6, 0, 0, 147, 143, 1, 0, 0, 0, 147, 144, 1, 0, 0, 0, 147, 145, 1, 0, 0, 0, 147, 146, 1, 0, 0, 0, 148, 40, 1, 0, 0, 0, 149, 150, 7, 7, 0, 0, 150, 42, 1, 0, 0, 0, 151, 152, 5, 92, 0, 0, 152, 158, 7, 4, 0, 0, 153, 154, 5, 92, 0, 0, 154, 158, 7, 5, 0, 0, 155, 156, 5, 92, 0, 0, 156, 158, 7, 1, 0, 0, 157, 151, 1, 0, 0, 0, 157, 153, 1, 0, 0, 0, 157, 155, 1, 0, 0, 0, 158, 44, 1, 0, 0, 0, 159, 160, 8, 8, 0, 0, 160, 46, 1, 0, 0, 0, 161, 162, 5, 92, 0, 0, 162, 163, 7, 9, 0, 0, 163, 48, 1, 0, 0, 0, 164, 165, 5, 92, 0, 0, 165, 166, 5, 34, 0, 0, 166, 50, 1, 0, 0, 0, 167, 168, 5, 92, 0, 0, 168, 169, 3, 53, 26, 0, 169, 52, 1, 0, 0, 0, 170, 171, 7, 10, 0, 0, 171, 172, 3, 55, 27, 0, 172, 173, 3, 55, 27, 0, 173, 174, 3, 55, 27, 0, 174, 175, 3, 55, 27, 0, 175, 54, 1, 0, 0, 0, 176, 177, 7, 11, 0, 0, 177, 56, 1, 0, 0, 0, 11, 0, 95, 101, 113, 120, 127, 129, 138, 141, 147, 157, 1, 6, 0, 0] \ No newline at end of file diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseLexer.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseLexer.java index b397a412d5e8e..f9353afd6e114 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseLexer.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseLexer.java @@ -25,9 +25,9 @@ class KqlBaseLexer extends Lexer { protected static final PredictionContextCache _sharedContextCache = new PredictionContextCache(); public static final int - DEFAULT_SKIP=1, AND=2, OR=3, NOT=4, COLON=5, OP_COMPARE=6, LEFT_PARENTHESIS=7, - RIGHT_PARENTHESIS=8, LEFT_CURLY_BRACKET=9, RIGHT_CURLY_BRACKET=10, UNQUOTED_LITERAL=11, - QUOTED_STRING=12, WILDCARD=13; + DEFAULT_SKIP=1, AND=2, OR=3, NOT=4, COLON=5, OP_LESS=6, OP_LESS_EQ=7, + OP_MORE=8, OP_MORE_EQ=9, LEFT_PARENTHESIS=10, RIGHT_PARENTHESIS=11, LEFT_CURLY_BRACKET=12, + RIGHT_CURLY_BRACKET=13, UNQUOTED_LITERAL=14, QUOTED_STRING=15, WILDCARD=16; public static String[] channelNames = { "DEFAULT_TOKEN_CHANNEL", "HIDDEN" }; @@ -38,28 +38,29 @@ class KqlBaseLexer extends Lexer { private static String[] makeRuleNames() { return new String[] { - "DEFAULT_SKIP", "AND", "OR", "NOT", "COLON", "OP_COMPARE", "LEFT_PARENTHESIS", - "RIGHT_PARENTHESIS", "LEFT_CURLY_BRACKET", "RIGHT_CURLY_BRACKET", "UNQUOTED_LITERAL", - "QUOTED_STRING", "WILDCARD", "WILDCARD_CHAR", "OP_LESS", "OP_LESS_EQ", - "OP_MORE", "OP_MORE_EQ", "UNQUOTED_LITERAL_CHAR", "QUOTED_CHAR", "WHITESPACE", - "ESCAPED_WHITESPACE", "NON_SPECIAL_CHAR", "ESCAPED_SPECIAL_CHAR", "ESCAPED_QUOTE", - "ESCAPE_UNICODE_SEQUENCE", "UNICODE_SEQUENCE", "HEX_DIGIT" + "DEFAULT_SKIP", "AND", "OR", "NOT", "COLON", "OP_LESS", "OP_LESS_EQ", + "OP_MORE", "OP_MORE_EQ", "LEFT_PARENTHESIS", "RIGHT_PARENTHESIS", "LEFT_CURLY_BRACKET", + "RIGHT_CURLY_BRACKET", "UNQUOTED_LITERAL", "QUOTED_STRING", "WILDCARD", + "WILDCARD_CHAR", "UNQUOTED_LITERAL_CHAR", "UNQUOTED_LITERAL_BASE_CHAR", + "QUOTED_CHAR", "WHITESPACE", "ESCAPED_WHITESPACE", "NON_SPECIAL_CHAR", + "ESCAPED_SPECIAL_CHAR", "ESCAPED_QUOTE", "ESCAPE_UNICODE_SEQUENCE", "UNICODE_SEQUENCE", + "HEX_DIGIT" }; } public static final String[] ruleNames = makeRuleNames(); private static String[] makeLiteralNames() { return new String[] { - null, null, "'and'", "'or'", "'not'", "':'", null, "'('", "')'", "'{'", - "'}'" + null, null, "'and'", "'or'", "'not'", "':'", "'<'", "'<='", "'>'", "'>='", + "'('", "')'", "'{'", "'}'" }; } private static final String[] _LITERAL_NAMES = makeLiteralNames(); private static String[] makeSymbolicNames() { return new String[] { - null, "DEFAULT_SKIP", "AND", "OR", "NOT", "COLON", "OP_COMPARE", "LEFT_PARENTHESIS", - "RIGHT_PARENTHESIS", "LEFT_CURLY_BRACKET", "RIGHT_CURLY_BRACKET", "UNQUOTED_LITERAL", - "QUOTED_STRING", "WILDCARD" + null, "DEFAULT_SKIP", "AND", "OR", "NOT", "COLON", "OP_LESS", "OP_LESS_EQ", + "OP_MORE", "OP_MORE_EQ", "LEFT_PARENTHESIS", "RIGHT_PARENTHESIS", "LEFT_CURLY_BRACKET", + "RIGHT_CURLY_BRACKET", "UNQUOTED_LITERAL", "QUOTED_STRING", "WILDCARD" }; } private static final String[] _SYMBOLIC_NAMES = makeSymbolicNames(); @@ -121,119 +122,119 @@ public KqlBaseLexer(CharStream input) { public ATN getATN() { return _ATN; } public static final String _serializedATN = - "\u0004\u0000\r\u00b5\u0006\uffff\uffff\u0002\u0000\u0007\u0000\u0002\u0001"+ - "\u0007\u0001\u0002\u0002\u0007\u0002\u0002\u0003\u0007\u0003\u0002\u0004"+ - "\u0007\u0004\u0002\u0005\u0007\u0005\u0002\u0006\u0007\u0006\u0002\u0007"+ - "\u0007\u0007\u0002\b\u0007\b\u0002\t\u0007\t\u0002\n\u0007\n\u0002\u000b"+ - "\u0007\u000b\u0002\f\u0007\f\u0002\r\u0007\r\u0002\u000e\u0007\u000e\u0002"+ - "\u000f\u0007\u000f\u0002\u0010\u0007\u0010\u0002\u0011\u0007\u0011\u0002"+ - "\u0012\u0007\u0012\u0002\u0013\u0007\u0013\u0002\u0014\u0007\u0014\u0002"+ - "\u0015\u0007\u0015\u0002\u0016\u0007\u0016\u0002\u0017\u0007\u0017\u0002"+ - "\u0018\u0007\u0018\u0002\u0019\u0007\u0019\u0002\u001a\u0007\u001a\u0002"+ - "\u001b\u0007\u001b\u0001\u0000\u0001\u0000\u0001\u0000\u0001\u0000\u0001"+ - "\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0002\u0001\u0002\u0001"+ - "\u0002\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0004\u0001"+ - "\u0004\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0003\u0005O\b"+ - "\u0005\u0001\u0006\u0001\u0006\u0001\u0007\u0001\u0007\u0001\b\u0001\b"+ - "\u0001\t\u0001\t\u0001\n\u0005\nZ\b\n\n\n\f\n]\t\n\u0001\n\u0004\n`\b"+ - "\n\u000b\n\f\na\u0001\n\u0005\ne\b\n\n\n\f\nh\t\n\u0001\u000b\u0001\u000b"+ - "\u0005\u000bl\b\u000b\n\u000b\f\u000bo\t\u000b\u0001\u000b\u0001\u000b"+ - "\u0001\f\u0004\ft\b\f\u000b\f\f\fu\u0001\r\u0001\r\u0001\u000e\u0001\u000e"+ - "\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u0010\u0001\u0010\u0001\u0011"+ - "\u0001\u0011\u0001\u0011\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0012"+ - "\u0001\u0012\u0001\u0012\u0001\u0012\u0003\u0012\u008b\b\u0012\u0001\u0012"+ - "\u0001\u0012\u0001\u0012\u0001\u0012\u0003\u0012\u0091\b\u0012\u0001\u0013"+ - "\u0001\u0013\u0001\u0013\u0001\u0013\u0003\u0013\u0097\b\u0013\u0001\u0014"+ - "\u0001\u0014\u0001\u0015\u0001\u0015\u0001\u0015\u0001\u0015\u0001\u0015"+ - "\u0001\u0015\u0003\u0015\u00a1\b\u0015\u0001\u0016\u0001\u0016\u0001\u0017"+ - "\u0001\u0017\u0001\u0017\u0001\u0018\u0001\u0018\u0001\u0018\u0001\u0019"+ - "\u0001\u0019\u0001\u0019\u0001\u001a\u0001\u001a\u0001\u001a\u0001\u001a"+ - "\u0001\u001a\u0001\u001a\u0001\u001b\u0001\u001b\u0000\u0000\u001c\u0001"+ - "\u0001\u0003\u0002\u0005\u0003\u0007\u0004\t\u0005\u000b\u0006\r\u0007"+ - "\u000f\b\u0011\t\u0013\n\u0015\u000b\u0017\f\u0019\r\u001b\u0000\u001d"+ - "\u0000\u001f\u0000!\u0000#\u0000%\u0000\'\u0000)\u0000+\u0000-\u0000/"+ - "\u00001\u00003\u00005\u00007\u0000\u0001\u0000\u000b\u0002\u0000AAaa\u0002"+ - "\u0000NNnn\u0002\u0000DDdd\u0002\u0000OOoo\u0002\u0000RRrr\u0002\u0000"+ - "TTtt\u0001\u0000\"\"\u0004\u0000\t\n\r\r \u3000\u3000\t\u0000 \"\"("+ - "*::<<>>\\\\{{}}\u0002\u0000UUuu\u0003\u000009AFaf\u00b9\u0000\u0001\u0001"+ - "\u0000\u0000\u0000\u0000\u0003\u0001\u0000\u0000\u0000\u0000\u0005\u0001"+ - "\u0000\u0000\u0000\u0000\u0007\u0001\u0000\u0000\u0000\u0000\t\u0001\u0000"+ - "\u0000\u0000\u0000\u000b\u0001\u0000\u0000\u0000\u0000\r\u0001\u0000\u0000"+ - "\u0000\u0000\u000f\u0001\u0000\u0000\u0000\u0000\u0011\u0001\u0000\u0000"+ - "\u0000\u0000\u0013\u0001\u0000\u0000\u0000\u0000\u0015\u0001\u0000\u0000"+ - "\u0000\u0000\u0017\u0001\u0000\u0000\u0000\u0000\u0019\u0001\u0000\u0000"+ - "\u0000\u00019\u0001\u0000\u0000\u0000\u0003=\u0001\u0000\u0000\u0000\u0005"+ - "A\u0001\u0000\u0000\u0000\u0007D\u0001\u0000\u0000\u0000\tH\u0001\u0000"+ - "\u0000\u0000\u000bN\u0001\u0000\u0000\u0000\rP\u0001\u0000\u0000\u0000"+ - "\u000fR\u0001\u0000\u0000\u0000\u0011T\u0001\u0000\u0000\u0000\u0013V"+ - "\u0001\u0000\u0000\u0000\u0015[\u0001\u0000\u0000\u0000\u0017i\u0001\u0000"+ - "\u0000\u0000\u0019s\u0001\u0000\u0000\u0000\u001bw\u0001\u0000\u0000\u0000"+ - "\u001dy\u0001\u0000\u0000\u0000\u001f{\u0001\u0000\u0000\u0000!~\u0001"+ - "\u0000\u0000\u0000#\u0080\u0001\u0000\u0000\u0000%\u0090\u0001\u0000\u0000"+ - "\u0000\'\u0096\u0001\u0000\u0000\u0000)\u0098\u0001\u0000\u0000\u0000"+ - "+\u00a0\u0001\u0000\u0000\u0000-\u00a2\u0001\u0000\u0000\u0000/\u00a4"+ - "\u0001\u0000\u0000\u00001\u00a7\u0001\u0000\u0000\u00003\u00aa\u0001\u0000"+ - "\u0000\u00005\u00ad\u0001\u0000\u0000\u00007\u00b3\u0001\u0000\u0000\u0000"+ - "9:\u0003)\u0014\u0000:;\u0001\u0000\u0000\u0000;<\u0006\u0000\u0000\u0000"+ - "<\u0002\u0001\u0000\u0000\u0000=>\u0007\u0000\u0000\u0000>?\u0007\u0001"+ - "\u0000\u0000?@\u0007\u0002\u0000\u0000@\u0004\u0001\u0000\u0000\u0000"+ - "AB\u0007\u0003\u0000\u0000BC\u0007\u0004\u0000\u0000C\u0006\u0001\u0000"+ - "\u0000\u0000DE\u0007\u0001\u0000\u0000EF\u0007\u0003\u0000\u0000FG\u0007"+ - "\u0005\u0000\u0000G\b\u0001\u0000\u0000\u0000HI\u0005:\u0000\u0000I\n"+ - "\u0001\u0000\u0000\u0000JO\u0003\u001d\u000e\u0000KO\u0003!\u0010\u0000"+ - "LO\u0003\u001f\u000f\u0000MO\u0003#\u0011\u0000NJ\u0001\u0000\u0000\u0000"+ - "NK\u0001\u0000\u0000\u0000NL\u0001\u0000\u0000\u0000NM\u0001\u0000\u0000"+ - "\u0000O\f\u0001\u0000\u0000\u0000PQ\u0005(\u0000\u0000Q\u000e\u0001\u0000"+ - "\u0000\u0000RS\u0005)\u0000\u0000S\u0010\u0001\u0000\u0000\u0000TU\u0005"+ - "{\u0000\u0000U\u0012\u0001\u0000\u0000\u0000VW\u0005}\u0000\u0000W\u0014"+ - "\u0001\u0000\u0000\u0000XZ\u0003\u0019\f\u0000YX\u0001\u0000\u0000\u0000"+ - "Z]\u0001\u0000\u0000\u0000[Y\u0001\u0000\u0000\u0000[\\\u0001\u0000\u0000"+ - "\u0000\\_\u0001\u0000\u0000\u0000][\u0001\u0000\u0000\u0000^`\u0003%\u0012"+ - "\u0000_^\u0001\u0000\u0000\u0000`a\u0001\u0000\u0000\u0000a_\u0001\u0000"+ - "\u0000\u0000ab\u0001\u0000\u0000\u0000bf\u0001\u0000\u0000\u0000ce\u0003"+ - "\u0019\f\u0000dc\u0001\u0000\u0000\u0000eh\u0001\u0000\u0000\u0000fd\u0001"+ - "\u0000\u0000\u0000fg\u0001\u0000\u0000\u0000g\u0016\u0001\u0000\u0000"+ - "\u0000hf\u0001\u0000\u0000\u0000im\u0005\"\u0000\u0000jl\u0003\'\u0013"+ - "\u0000kj\u0001\u0000\u0000\u0000lo\u0001\u0000\u0000\u0000mk\u0001\u0000"+ - "\u0000\u0000mn\u0001\u0000\u0000\u0000np\u0001\u0000\u0000\u0000om\u0001"+ - "\u0000\u0000\u0000pq\u0005\"\u0000\u0000q\u0018\u0001\u0000\u0000\u0000"+ - "rt\u0003\u001b\r\u0000sr\u0001\u0000\u0000\u0000tu\u0001\u0000\u0000\u0000"+ - "us\u0001\u0000\u0000\u0000uv\u0001\u0000\u0000\u0000v\u001a\u0001\u0000"+ - "\u0000\u0000wx\u0005*\u0000\u0000x\u001c\u0001\u0000\u0000\u0000yz\u0005"+ - "<\u0000\u0000z\u001e\u0001\u0000\u0000\u0000{|\u0005<\u0000\u0000|}\u0005"+ - "=\u0000\u0000} \u0001\u0000\u0000\u0000~\u007f\u0005>\u0000\u0000\u007f"+ - "\"\u0001\u0000\u0000\u0000\u0080\u0081\u0005>\u0000\u0000\u0081\u0082"+ - "\u0005=\u0000\u0000\u0082$\u0001\u0000\u0000\u0000\u0083\u0091\u0003+"+ - "\u0015\u0000\u0084\u0091\u0003/\u0017\u0000\u0085\u0091\u00033\u0019\u0000"+ - "\u0086\u008a\u0005\\\u0000\u0000\u0087\u008b\u0003\u0003\u0001\u0000\u0088"+ - "\u008b\u0003\u0005\u0002\u0000\u0089\u008b\u0003\u0007\u0003\u0000\u008a"+ - "\u0087\u0001\u0000\u0000\u0000\u008a\u0088\u0001\u0000\u0000\u0000\u008a"+ - "\u0089\u0001\u0000\u0000\u0000\u008b\u0091\u0001\u0000\u0000\u0000\u008c"+ - "\u008d\u0003\u001b\r\u0000\u008d\u008e\u0003%\u0012\u0000\u008e\u0091"+ - "\u0001\u0000\u0000\u0000\u008f\u0091\u0003-\u0016\u0000\u0090\u0083\u0001"+ - "\u0000\u0000\u0000\u0090\u0084\u0001\u0000\u0000\u0000\u0090\u0085\u0001"+ - "\u0000\u0000\u0000\u0090\u0086\u0001\u0000\u0000\u0000\u0090\u008c\u0001"+ - "\u0000\u0000\u0000\u0090\u008f\u0001\u0000\u0000\u0000\u0091&\u0001\u0000"+ - "\u0000\u0000\u0092\u0097\u0003+\u0015\u0000\u0093\u0097\u00033\u0019\u0000"+ - "\u0094\u0097\u00031\u0018\u0000\u0095\u0097\b\u0006\u0000\u0000\u0096"+ - "\u0092\u0001\u0000\u0000\u0000\u0096\u0093\u0001\u0000\u0000\u0000\u0096"+ - "\u0094\u0001\u0000\u0000\u0000\u0096\u0095\u0001\u0000\u0000\u0000\u0097"+ - "(\u0001\u0000\u0000\u0000\u0098\u0099\u0007\u0007\u0000\u0000\u0099*\u0001"+ - "\u0000\u0000\u0000\u009a\u009b\u0005\\\u0000\u0000\u009b\u00a1\u0007\u0004"+ - "\u0000\u0000\u009c\u009d\u0005\\\u0000\u0000\u009d\u00a1\u0007\u0005\u0000"+ - "\u0000\u009e\u009f\u0005\\\u0000\u0000\u009f\u00a1\u0007\u0001\u0000\u0000"+ - "\u00a0\u009a\u0001\u0000\u0000\u0000\u00a0\u009c\u0001\u0000\u0000\u0000"+ - "\u00a0\u009e\u0001\u0000\u0000\u0000\u00a1,\u0001\u0000\u0000\u0000\u00a2"+ - "\u00a3\b\b\u0000\u0000\u00a3.\u0001\u0000\u0000\u0000\u00a4\u00a5\u0005"+ - "\\\u0000\u0000\u00a5\u00a6\u0007\b\u0000\u0000\u00a60\u0001\u0000\u0000"+ - "\u0000\u00a7\u00a8\u0005\\\u0000\u0000\u00a8\u00a9\u0005\"\u0000\u0000"+ - "\u00a92\u0001\u0000\u0000\u0000\u00aa\u00ab\u0005\\\u0000\u0000\u00ab"+ - "\u00ac\u00035\u001a\u0000\u00ac4\u0001\u0000\u0000\u0000\u00ad\u00ae\u0007"+ - "\t\u0000\u0000\u00ae\u00af\u00037\u001b\u0000\u00af\u00b0\u00037\u001b"+ - "\u0000\u00b0\u00b1\u00037\u001b\u0000\u00b1\u00b2\u00037\u001b\u0000\u00b2"+ - "6\u0001\u0000\u0000\u0000\u00b3\u00b4\u0007\n\u0000\u0000\u00b48\u0001"+ - "\u0000\u0000\u0000\u000b\u0000N[afmu\u008a\u0090\u0096\u00a0\u0001\u0006"+ - "\u0000\u0000"; + "\u0004\u0000\u0010\u00b2\u0006\uffff\uffff\u0002\u0000\u0007\u0000\u0002"+ + "\u0001\u0007\u0001\u0002\u0002\u0007\u0002\u0002\u0003\u0007\u0003\u0002"+ + "\u0004\u0007\u0004\u0002\u0005\u0007\u0005\u0002\u0006\u0007\u0006\u0002"+ + "\u0007\u0007\u0007\u0002\b\u0007\b\u0002\t\u0007\t\u0002\n\u0007\n\u0002"+ + "\u000b\u0007\u000b\u0002\f\u0007\f\u0002\r\u0007\r\u0002\u000e\u0007\u000e"+ + "\u0002\u000f\u0007\u000f\u0002\u0010\u0007\u0010\u0002\u0011\u0007\u0011"+ + "\u0002\u0012\u0007\u0012\u0002\u0013\u0007\u0013\u0002\u0014\u0007\u0014"+ + "\u0002\u0015\u0007\u0015\u0002\u0016\u0007\u0016\u0002\u0017\u0007\u0017"+ + "\u0002\u0018\u0007\u0018\u0002\u0019\u0007\u0019\u0002\u001a\u0007\u001a"+ + "\u0002\u001b\u0007\u001b\u0001\u0000\u0001\u0000\u0001\u0000\u0001\u0000"+ + "\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0002\u0001\u0002"+ + "\u0001\u0002\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0004"+ + "\u0001\u0004\u0001\u0005\u0001\u0005\u0001\u0006\u0001\u0006\u0001\u0006"+ + "\u0001\u0007\u0001\u0007\u0001\b\u0001\b\u0001\b\u0001\t\u0001\t\u0001"+ + "\n\u0001\n\u0001\u000b\u0001\u000b\u0001\f\u0001\f\u0001\r\u0004\r^\b"+ + "\r\u000b\r\f\r_\u0001\u000e\u0001\u000e\u0005\u000ed\b\u000e\n\u000e\f"+ + "\u000eg\t\u000e\u0001\u000e\u0001\u000e\u0001\u000f\u0001\u000f\u0001"+ + "\u0010\u0001\u0010\u0001\u0011\u0005\u0011p\b\u0011\n\u0011\f\u0011s\t"+ + "\u0011\u0001\u0011\u0001\u0011\u0005\u0011w\b\u0011\n\u0011\f\u0011z\t"+ + "\u0011\u0001\u0011\u0001\u0011\u0004\u0011~\b\u0011\u000b\u0011\f\u0011"+ + "\u007f\u0003\u0011\u0082\b\u0011\u0001\u0012\u0001\u0012\u0001\u0012\u0001"+ + "\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0003\u0012\u008b\b\u0012\u0001"+ + "\u0012\u0003\u0012\u008e\b\u0012\u0001\u0013\u0001\u0013\u0001\u0013\u0001"+ + "\u0013\u0003\u0013\u0094\b\u0013\u0001\u0014\u0001\u0014\u0001\u0015\u0001"+ + "\u0015\u0001\u0015\u0001\u0015\u0001\u0015\u0001\u0015\u0003\u0015\u009e"+ + "\b\u0015\u0001\u0016\u0001\u0016\u0001\u0017\u0001\u0017\u0001\u0017\u0001"+ + "\u0018\u0001\u0018\u0001\u0018\u0001\u0019\u0001\u0019\u0001\u0019\u0001"+ + "\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0001"+ + "\u001b\u0001\u001b\u0000\u0000\u001c\u0001\u0001\u0003\u0002\u0005\u0003"+ + "\u0007\u0004\t\u0005\u000b\u0006\r\u0007\u000f\b\u0011\t\u0013\n\u0015"+ + "\u000b\u0017\f\u0019\r\u001b\u000e\u001d\u000f\u001f\u0010!\u0000#\u0000"+ + "%\u0000\'\u0000)\u0000+\u0000-\u0000/\u00001\u00003\u00005\u00007\u0000"+ + "\u0001\u0000\f\u0002\u0000AAaa\u0002\u0000NNnn\u0002\u0000DDdd\u0002\u0000"+ + "OOoo\u0002\u0000RRrr\u0002\u0000TTtt\u0001\u0000\"\"\u0004\u0000\t\n\r"+ + "\r \u3000\u3000\f\u0000\t\n\r\r \"\"(*::<<>>\\\\{{}}\u3000\u3000\t\u0000"+ + " \"\"(*::<<>>\\\\{{}}\u0002\u0000UUuu\u0003\u000009AFaf\u00b6\u0000\u0001"+ + "\u0001\u0000\u0000\u0000\u0000\u0003\u0001\u0000\u0000\u0000\u0000\u0005"+ + "\u0001\u0000\u0000\u0000\u0000\u0007\u0001\u0000\u0000\u0000\u0000\t\u0001"+ + "\u0000\u0000\u0000\u0000\u000b\u0001\u0000\u0000\u0000\u0000\r\u0001\u0000"+ + "\u0000\u0000\u0000\u000f\u0001\u0000\u0000\u0000\u0000\u0011\u0001\u0000"+ + "\u0000\u0000\u0000\u0013\u0001\u0000\u0000\u0000\u0000\u0015\u0001\u0000"+ + "\u0000\u0000\u0000\u0017\u0001\u0000\u0000\u0000\u0000\u0019\u0001\u0000"+ + "\u0000\u0000\u0000\u001b\u0001\u0000\u0000\u0000\u0000\u001d\u0001\u0000"+ + "\u0000\u0000\u0000\u001f\u0001\u0000\u0000\u0000\u00019\u0001\u0000\u0000"+ + "\u0000\u0003=\u0001\u0000\u0000\u0000\u0005A\u0001\u0000\u0000\u0000\u0007"+ + "D\u0001\u0000\u0000\u0000\tH\u0001\u0000\u0000\u0000\u000bJ\u0001\u0000"+ + "\u0000\u0000\rL\u0001\u0000\u0000\u0000\u000fO\u0001\u0000\u0000\u0000"+ + "\u0011Q\u0001\u0000\u0000\u0000\u0013T\u0001\u0000\u0000\u0000\u0015V"+ + "\u0001\u0000\u0000\u0000\u0017X\u0001\u0000\u0000\u0000\u0019Z\u0001\u0000"+ + "\u0000\u0000\u001b]\u0001\u0000\u0000\u0000\u001da\u0001\u0000\u0000\u0000"+ + "\u001fj\u0001\u0000\u0000\u0000!l\u0001\u0000\u0000\u0000#\u0081\u0001"+ + "\u0000\u0000\u0000%\u008d\u0001\u0000\u0000\u0000\'\u0093\u0001\u0000"+ + "\u0000\u0000)\u0095\u0001\u0000\u0000\u0000+\u009d\u0001\u0000\u0000\u0000"+ + "-\u009f\u0001\u0000\u0000\u0000/\u00a1\u0001\u0000\u0000\u00001\u00a4"+ + "\u0001\u0000\u0000\u00003\u00a7\u0001\u0000\u0000\u00005\u00aa\u0001\u0000"+ + "\u0000\u00007\u00b0\u0001\u0000\u0000\u00009:\u0003)\u0014\u0000:;\u0001"+ + "\u0000\u0000\u0000;<\u0006\u0000\u0000\u0000<\u0002\u0001\u0000\u0000"+ + "\u0000=>\u0007\u0000\u0000\u0000>?\u0007\u0001\u0000\u0000?@\u0007\u0002"+ + "\u0000\u0000@\u0004\u0001\u0000\u0000\u0000AB\u0007\u0003\u0000\u0000"+ + "BC\u0007\u0004\u0000\u0000C\u0006\u0001\u0000\u0000\u0000DE\u0007\u0001"+ + "\u0000\u0000EF\u0007\u0003\u0000\u0000FG\u0007\u0005\u0000\u0000G\b\u0001"+ + "\u0000\u0000\u0000HI\u0005:\u0000\u0000I\n\u0001\u0000\u0000\u0000JK\u0005"+ + "<\u0000\u0000K\f\u0001\u0000\u0000\u0000LM\u0005<\u0000\u0000MN\u0005"+ + "=\u0000\u0000N\u000e\u0001\u0000\u0000\u0000OP\u0005>\u0000\u0000P\u0010"+ + "\u0001\u0000\u0000\u0000QR\u0005>\u0000\u0000RS\u0005=\u0000\u0000S\u0012"+ + "\u0001\u0000\u0000\u0000TU\u0005(\u0000\u0000U\u0014\u0001\u0000\u0000"+ + "\u0000VW\u0005)\u0000\u0000W\u0016\u0001\u0000\u0000\u0000XY\u0005{\u0000"+ + "\u0000Y\u0018\u0001\u0000\u0000\u0000Z[\u0005}\u0000\u0000[\u001a\u0001"+ + "\u0000\u0000\u0000\\^\u0003#\u0011\u0000]\\\u0001\u0000\u0000\u0000^_"+ + "\u0001\u0000\u0000\u0000_]\u0001\u0000\u0000\u0000_`\u0001\u0000\u0000"+ + "\u0000`\u001c\u0001\u0000\u0000\u0000ae\u0005\"\u0000\u0000bd\u0003\'"+ + "\u0013\u0000cb\u0001\u0000\u0000\u0000dg\u0001\u0000\u0000\u0000ec\u0001"+ + "\u0000\u0000\u0000ef\u0001\u0000\u0000\u0000fh\u0001\u0000\u0000\u0000"+ + "ge\u0001\u0000\u0000\u0000hi\u0005\"\u0000\u0000i\u001e\u0001\u0000\u0000"+ + "\u0000jk\u0003!\u0010\u0000k \u0001\u0000\u0000\u0000lm\u0005*\u0000\u0000"+ + "m\"\u0001\u0000\u0000\u0000np\u0003!\u0010\u0000on\u0001\u0000\u0000\u0000"+ + "ps\u0001\u0000\u0000\u0000qo\u0001\u0000\u0000\u0000qr\u0001\u0000\u0000"+ + "\u0000rt\u0001\u0000\u0000\u0000sq\u0001\u0000\u0000\u0000tx\u0003%\u0012"+ + "\u0000uw\u0003!\u0010\u0000vu\u0001\u0000\u0000\u0000wz\u0001\u0000\u0000"+ + "\u0000xv\u0001\u0000\u0000\u0000xy\u0001\u0000\u0000\u0000y\u0082\u0001"+ + "\u0000\u0000\u0000zx\u0001\u0000\u0000\u0000{}\u0003!\u0010\u0000|~\u0003"+ + "!\u0010\u0000}|\u0001\u0000\u0000\u0000~\u007f\u0001\u0000\u0000\u0000"+ + "\u007f}\u0001\u0000\u0000\u0000\u007f\u0080\u0001\u0000\u0000\u0000\u0080"+ + "\u0082\u0001\u0000\u0000\u0000\u0081q\u0001\u0000\u0000\u0000\u0081{\u0001"+ + "\u0000\u0000\u0000\u0082$\u0001\u0000\u0000\u0000\u0083\u008e\u0003+\u0015"+ + "\u0000\u0084\u008e\u0003/\u0017\u0000\u0085\u008e\u00033\u0019\u0000\u0086"+ + "\u008a\u0005\\\u0000\u0000\u0087\u008b\u0003\u0003\u0001\u0000\u0088\u008b"+ + "\u0003\u0005\u0002\u0000\u0089\u008b\u0003\u0007\u0003\u0000\u008a\u0087"+ + "\u0001\u0000\u0000\u0000\u008a\u0088\u0001\u0000\u0000\u0000\u008a\u0089"+ + "\u0001\u0000\u0000\u0000\u008b\u008e\u0001\u0000\u0000\u0000\u008c\u008e"+ + "\u0003-\u0016\u0000\u008d\u0083\u0001\u0000\u0000\u0000\u008d\u0084\u0001"+ + "\u0000\u0000\u0000\u008d\u0085\u0001\u0000\u0000\u0000\u008d\u0086\u0001"+ + "\u0000\u0000\u0000\u008d\u008c\u0001\u0000\u0000\u0000\u008e&\u0001\u0000"+ + "\u0000\u0000\u008f\u0094\u0003+\u0015\u0000\u0090\u0094\u00033\u0019\u0000"+ + "\u0091\u0094\u00031\u0018\u0000\u0092\u0094\b\u0006\u0000\u0000\u0093"+ + "\u008f\u0001\u0000\u0000\u0000\u0093\u0090\u0001\u0000\u0000\u0000\u0093"+ + "\u0091\u0001\u0000\u0000\u0000\u0093\u0092\u0001\u0000\u0000\u0000\u0094"+ + "(\u0001\u0000\u0000\u0000\u0095\u0096\u0007\u0007\u0000\u0000\u0096*\u0001"+ + "\u0000\u0000\u0000\u0097\u0098\u0005\\\u0000\u0000\u0098\u009e\u0007\u0004"+ + "\u0000\u0000\u0099\u009a\u0005\\\u0000\u0000\u009a\u009e\u0007\u0005\u0000"+ + "\u0000\u009b\u009c\u0005\\\u0000\u0000\u009c\u009e\u0007\u0001\u0000\u0000"+ + "\u009d\u0097\u0001\u0000\u0000\u0000\u009d\u0099\u0001\u0000\u0000\u0000"+ + "\u009d\u009b\u0001\u0000\u0000\u0000\u009e,\u0001\u0000\u0000\u0000\u009f"+ + "\u00a0\b\b\u0000\u0000\u00a0.\u0001\u0000\u0000\u0000\u00a1\u00a2\u0005"+ + "\\\u0000\u0000\u00a2\u00a3\u0007\t\u0000\u0000\u00a30\u0001\u0000\u0000"+ + "\u0000\u00a4\u00a5\u0005\\\u0000\u0000\u00a5\u00a6\u0005\"\u0000\u0000"+ + "\u00a62\u0001\u0000\u0000\u0000\u00a7\u00a8\u0005\\\u0000\u0000\u00a8"+ + "\u00a9\u00035\u001a\u0000\u00a94\u0001\u0000\u0000\u0000\u00aa\u00ab\u0007"+ + "\n\u0000\u0000\u00ab\u00ac\u00037\u001b\u0000\u00ac\u00ad\u00037\u001b"+ + "\u0000\u00ad\u00ae\u00037\u001b\u0000\u00ae\u00af\u00037\u001b\u0000\u00af"+ + "6\u0001\u0000\u0000\u0000\u00b0\u00b1\u0007\u000b\u0000\u0000\u00b18\u0001"+ + "\u0000\u0000\u0000\u000b\u0000_eqx\u007f\u0081\u008a\u008d\u0093\u009d"+ + "\u0001\u0006\u0000\u0000"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseListener.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseListener.java index bce2044fa8175..505569dbde58d 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseListener.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseListener.java @@ -71,16 +71,6 @@ interface KqlBaseListener extends ParseTreeListener { * @param ctx the parse tree */ void exitSimpleQuery(KqlBaseParser.SimpleQueryContext ctx); - /** - * Enter a parse tree produced by {@link KqlBaseParser#expression}. - * @param ctx the parse tree - */ - void enterExpression(KqlBaseParser.ExpressionContext ctx); - /** - * Exit a parse tree produced by {@link KqlBaseParser#expression}. - * @param ctx the parse tree - */ - void exitExpression(KqlBaseParser.ExpressionContext ctx); /** * Enter a parse tree produced by {@link KqlBaseParser#nestedQuery}. * @param ctx the parse tree @@ -92,45 +82,35 @@ interface KqlBaseListener extends ParseTreeListener { */ void exitNestedQuery(KqlBaseParser.NestedQueryContext ctx); /** - * Enter a parse tree produced by {@link KqlBaseParser#parenthesizedQuery}. - * @param ctx the parse tree - */ - void enterParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx); - /** - * Exit a parse tree produced by {@link KqlBaseParser#parenthesizedQuery}. - * @param ctx the parse tree - */ - void exitParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx); - /** - * Enter a parse tree produced by {@link KqlBaseParser#fieldRangeQuery}. + * Enter a parse tree produced by {@link KqlBaseParser#matchAllQuery}. * @param ctx the parse tree */ - void enterFieldRangeQuery(KqlBaseParser.FieldRangeQueryContext ctx); + void enterMatchAllQuery(KqlBaseParser.MatchAllQueryContext ctx); /** - * Exit a parse tree produced by {@link KqlBaseParser#fieldRangeQuery}. + * Exit a parse tree produced by {@link KqlBaseParser#matchAllQuery}. * @param ctx the parse tree */ - void exitFieldRangeQuery(KqlBaseParser.FieldRangeQueryContext ctx); + void exitMatchAllQuery(KqlBaseParser.MatchAllQueryContext ctx); /** - * Enter a parse tree produced by {@link KqlBaseParser#fieldTermQuery}. + * Enter a parse tree produced by {@link KqlBaseParser#parenthesizedQuery}. * @param ctx the parse tree */ - void enterFieldTermQuery(KqlBaseParser.FieldTermQueryContext ctx); + void enterParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx); /** - * Exit a parse tree produced by {@link KqlBaseParser#fieldTermQuery}. + * Exit a parse tree produced by {@link KqlBaseParser#parenthesizedQuery}. * @param ctx the parse tree */ - void exitFieldTermQuery(KqlBaseParser.FieldTermQueryContext ctx); + void exitParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx); /** - * Enter a parse tree produced by {@link KqlBaseParser#fieldName}. + * Enter a parse tree produced by {@link KqlBaseParser#rangeQuery}. * @param ctx the parse tree */ - void enterFieldName(KqlBaseParser.FieldNameContext ctx); + void enterRangeQuery(KqlBaseParser.RangeQueryContext ctx); /** - * Exit a parse tree produced by {@link KqlBaseParser#fieldName}. + * Exit a parse tree produced by {@link KqlBaseParser#rangeQuery}. * @param ctx the parse tree */ - void exitFieldName(KqlBaseParser.FieldNameContext ctx); + void exitRangeQuery(KqlBaseParser.RangeQueryContext ctx); /** * Enter a parse tree produced by {@link KqlBaseParser#rangeQueryValue}. * @param ctx the parse tree @@ -142,53 +122,53 @@ interface KqlBaseListener extends ParseTreeListener { */ void exitRangeQueryValue(KqlBaseParser.RangeQueryValueContext ctx); /** - * Enter a parse tree produced by {@link KqlBaseParser#termQueryValue}. + * Enter a parse tree produced by {@link KqlBaseParser#existsQuery}. * @param ctx the parse tree */ - void enterTermQueryValue(KqlBaseParser.TermQueryValueContext ctx); + void enterExistsQuery(KqlBaseParser.ExistsQueryContext ctx); /** - * Exit a parse tree produced by {@link KqlBaseParser#termQueryValue}. + * Exit a parse tree produced by {@link KqlBaseParser#existsQuery}. * @param ctx the parse tree */ - void exitTermQueryValue(KqlBaseParser.TermQueryValueContext ctx); + void exitExistsQuery(KqlBaseParser.ExistsQueryContext ctx); /** - * Enter a parse tree produced by {@link KqlBaseParser#groupingTermExpression}. + * Enter a parse tree produced by {@link KqlBaseParser#fieldQuery}. * @param ctx the parse tree */ - void enterGroupingTermExpression(KqlBaseParser.GroupingTermExpressionContext ctx); + void enterFieldQuery(KqlBaseParser.FieldQueryContext ctx); /** - * Exit a parse tree produced by {@link KqlBaseParser#groupingTermExpression}. + * Exit a parse tree produced by {@link KqlBaseParser#fieldQuery}. * @param ctx the parse tree */ - void exitGroupingTermExpression(KqlBaseParser.GroupingTermExpressionContext ctx); + void exitFieldQuery(KqlBaseParser.FieldQueryContext ctx); /** - * Enter a parse tree produced by {@link KqlBaseParser#unquotedLiteralExpression}. + * Enter a parse tree produced by {@link KqlBaseParser#fieldLessQuery}. * @param ctx the parse tree */ - void enterUnquotedLiteralExpression(KqlBaseParser.UnquotedLiteralExpressionContext ctx); + void enterFieldLessQuery(KqlBaseParser.FieldLessQueryContext ctx); /** - * Exit a parse tree produced by {@link KqlBaseParser#unquotedLiteralExpression}. + * Exit a parse tree produced by {@link KqlBaseParser#fieldLessQuery}. * @param ctx the parse tree */ - void exitUnquotedLiteralExpression(KqlBaseParser.UnquotedLiteralExpressionContext ctx); + void exitFieldLessQuery(KqlBaseParser.FieldLessQueryContext ctx); /** - * Enter a parse tree produced by {@link KqlBaseParser#quotedStringExpression}. + * Enter a parse tree produced by {@link KqlBaseParser#fieldQueryValue}. * @param ctx the parse tree */ - void enterQuotedStringExpression(KqlBaseParser.QuotedStringExpressionContext ctx); + void enterFieldQueryValue(KqlBaseParser.FieldQueryValueContext ctx); /** - * Exit a parse tree produced by {@link KqlBaseParser#quotedStringExpression}. + * Exit a parse tree produced by {@link KqlBaseParser#fieldQueryValue}. * @param ctx the parse tree */ - void exitQuotedStringExpression(KqlBaseParser.QuotedStringExpressionContext ctx); + void exitFieldQueryValue(KqlBaseParser.FieldQueryValueContext ctx); /** - * Enter a parse tree produced by {@link KqlBaseParser#wildcardExpression}. + * Enter a parse tree produced by {@link KqlBaseParser#fieldName}. * @param ctx the parse tree */ - void enterWildcardExpression(KqlBaseParser.WildcardExpressionContext ctx); + void enterFieldName(KqlBaseParser.FieldNameContext ctx); /** - * Exit a parse tree produced by {@link KqlBaseParser#wildcardExpression}. + * Exit a parse tree produced by {@link KqlBaseParser#fieldName}. * @param ctx the parse tree */ - void exitWildcardExpression(KqlBaseParser.WildcardExpressionContext ctx); + void exitFieldName(KqlBaseParser.FieldNameContext ctx); } diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseParser.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseParser.java index 3bd9cc4104d2c..3ee44e389a371 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseParser.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseParser.java @@ -25,37 +25,35 @@ class KqlBaseParser extends Parser { protected static final PredictionContextCache _sharedContextCache = new PredictionContextCache(); public static final int - DEFAULT_SKIP=1, AND=2, OR=3, NOT=4, COLON=5, OP_COMPARE=6, LEFT_PARENTHESIS=7, - RIGHT_PARENTHESIS=8, LEFT_CURLY_BRACKET=9, RIGHT_CURLY_BRACKET=10, UNQUOTED_LITERAL=11, - QUOTED_STRING=12, WILDCARD=13; + DEFAULT_SKIP=1, AND=2, OR=3, NOT=4, COLON=5, OP_LESS=6, OP_LESS_EQ=7, + OP_MORE=8, OP_MORE_EQ=9, LEFT_PARENTHESIS=10, RIGHT_PARENTHESIS=11, LEFT_CURLY_BRACKET=12, + RIGHT_CURLY_BRACKET=13, UNQUOTED_LITERAL=14, QUOTED_STRING=15, WILDCARD=16; public static final int - RULE_topLevelQuery = 0, RULE_query = 1, RULE_simpleQuery = 2, RULE_expression = 3, - RULE_nestedQuery = 4, RULE_parenthesizedQuery = 5, RULE_fieldRangeQuery = 6, - RULE_fieldTermQuery = 7, RULE_fieldName = 8, RULE_rangeQueryValue = 9, - RULE_termQueryValue = 10, RULE_groupingTermExpression = 11, RULE_unquotedLiteralExpression = 12, - RULE_quotedStringExpression = 13, RULE_wildcardExpression = 14; + RULE_topLevelQuery = 0, RULE_query = 1, RULE_simpleQuery = 2, RULE_nestedQuery = 3, + RULE_matchAllQuery = 4, RULE_parenthesizedQuery = 5, RULE_rangeQuery = 6, + RULE_rangeQueryValue = 7, RULE_existsQuery = 8, RULE_fieldQuery = 9, RULE_fieldLessQuery = 10, + RULE_fieldQueryValue = 11, RULE_fieldName = 12; private static String[] makeRuleNames() { return new String[] { - "topLevelQuery", "query", "simpleQuery", "expression", "nestedQuery", - "parenthesizedQuery", "fieldRangeQuery", "fieldTermQuery", "fieldName", - "rangeQueryValue", "termQueryValue", "groupingTermExpression", "unquotedLiteralExpression", - "quotedStringExpression", "wildcardExpression" + "topLevelQuery", "query", "simpleQuery", "nestedQuery", "matchAllQuery", + "parenthesizedQuery", "rangeQuery", "rangeQueryValue", "existsQuery", + "fieldQuery", "fieldLessQuery", "fieldQueryValue", "fieldName" }; } public static final String[] ruleNames = makeRuleNames(); private static String[] makeLiteralNames() { return new String[] { - null, null, "'and'", "'or'", "'not'", "':'", null, "'('", "')'", "'{'", - "'}'" + null, null, "'and'", "'or'", "'not'", "':'", "'<'", "'<='", "'>'", "'>='", + "'('", "')'", "'{'", "'}'" }; } private static final String[] _LITERAL_NAMES = makeLiteralNames(); private static String[] makeSymbolicNames() { return new String[] { - null, "DEFAULT_SKIP", "AND", "OR", "NOT", "COLON", "OP_COMPARE", "LEFT_PARENTHESIS", - "RIGHT_PARENTHESIS", "LEFT_CURLY_BRACKET", "RIGHT_CURLY_BRACKET", "UNQUOTED_LITERAL", - "QUOTED_STRING", "WILDCARD" + null, "DEFAULT_SKIP", "AND", "OR", "NOT", "COLON", "OP_LESS", "OP_LESS_EQ", + "OP_MORE", "OP_MORE_EQ", "LEFT_PARENTHESIS", "RIGHT_PARENTHESIS", "LEFT_CURLY_BRACKET", + "RIGHT_CURLY_BRACKET", "UNQUOTED_LITERAL", "QUOTED_STRING", "WILDCARD" }; } private static final String[] _SYMBOLIC_NAMES = makeSymbolicNames(); @@ -141,17 +139,17 @@ public final TopLevelQueryContext topLevelQuery() throws RecognitionException { try { enterOuterAlt(_localctx, 1); { - setState(31); + setState(27); _errHandler.sync(this); _la = _input.LA(1); - if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 14480L) != 0)) { + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & 115740L) != 0)) { { - setState(30); + setState(26); query(0); } } - setState(33); + setState(29); match(EOF); } } @@ -202,6 +200,7 @@ public T accept(ParseTreeVisitor visitor) { } @SuppressWarnings("CheckReturnValue") public static class BooleanQueryContext extends QueryContext { + public Token operator; public List query() { return getRuleContexts(QueryContext.class); } @@ -262,38 +261,33 @@ private QueryContext query(int _p) throws RecognitionException { int _alt; enterOuterAlt(_localctx, 1); { - setState(39); + setState(35); _errHandler.sync(this); - switch (_input.LA(1)) { - case NOT: + switch ( getInterpreter().adaptivePredict(_input,1,_ctx) ) { + case 1: { _localctx = new NotQueryContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(36); + setState(32); match(NOT); - setState(37); + setState(33); ((NotQueryContext)_localctx).subQuery = simpleQuery(); } break; - case LEFT_PARENTHESIS: - case UNQUOTED_LITERAL: - case QUOTED_STRING: - case WILDCARD: + case 2: { _localctx = new DefaultQueryContext(_localctx); _ctx = _localctx; _prevctx = _localctx; - setState(38); + setState(34); simpleQuery(); } break; - default: - throw new NoViableAltException(this); } _ctx.stop = _input.LT(-1); - setState(46); + setState(42); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,2,_ctx); while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { @@ -304,24 +298,25 @@ private QueryContext query(int _p) throws RecognitionException { { _localctx = new BooleanQueryContext(new QueryContext(_parentctx, _parentState)); pushNewRecursionContext(_localctx, _startState, RULE_query); - setState(41); + setState(37); if (!(precpred(_ctx, 3))) throw new FailedPredicateException(this, "precpred(_ctx, 3)"); - setState(42); + setState(38); + ((BooleanQueryContext)_localctx).operator = _input.LT(1); _la = _input.LA(1); if ( !(_la==AND || _la==OR) ) { - _errHandler.recoverInline(this); + ((BooleanQueryContext)_localctx).operator = (Token)_errHandler.recoverInline(this); } else { if ( _input.LA(1)==Token.EOF ) matchedEOF = true; _errHandler.reportMatch(this); consume(); } - setState(43); - query(4); + setState(39); + query(3); } } } - setState(48); + setState(44); _errHandler.sync(this); _alt = getInterpreter().adaptivePredict(_input,2,_ctx); } @@ -343,12 +338,24 @@ public static class SimpleQueryContext extends ParserRuleContext { public NestedQueryContext nestedQuery() { return getRuleContext(NestedQueryContext.class,0); } - public ExpressionContext expression() { - return getRuleContext(ExpressionContext.class,0); - } public ParenthesizedQueryContext parenthesizedQuery() { return getRuleContext(ParenthesizedQueryContext.class,0); } + public MatchAllQueryContext matchAllQuery() { + return getRuleContext(MatchAllQueryContext.class,0); + } + public ExistsQueryContext existsQuery() { + return getRuleContext(ExistsQueryContext.class,0); + } + public RangeQueryContext rangeQuery() { + return getRuleContext(RangeQueryContext.class,0); + } + public FieldQueryContext fieldQuery() { + return getRuleContext(FieldQueryContext.class,0); + } + public FieldLessQueryContext fieldLessQuery() { + return getRuleContext(FieldLessQueryContext.class,0); + } public SimpleQueryContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } @@ -378,83 +385,50 @@ public final SimpleQueryContext simpleQuery() throws RecognitionException { case 1: enterOuterAlt(_localctx, 1); { - setState(49); + setState(45); nestedQuery(); } break; case 2: enterOuterAlt(_localctx, 2); { - setState(50); - expression(); + setState(46); + parenthesizedQuery(); } break; case 3: enterOuterAlt(_localctx, 3); { - setState(51); - parenthesizedQuery(); + setState(47); + matchAllQuery(); } break; - } - } - catch (RecognitionException re) { - _localctx.exception = re; - _errHandler.reportError(this, re); - _errHandler.recover(this, re); - } - finally { - exitRule(); - } - return _localctx; - } - - @SuppressWarnings("CheckReturnValue") - public static class ExpressionContext extends ParserRuleContext { - public FieldTermQueryContext fieldTermQuery() { - return getRuleContext(FieldTermQueryContext.class,0); - } - public FieldRangeQueryContext fieldRangeQuery() { - return getRuleContext(FieldRangeQueryContext.class,0); - } - public ExpressionContext(ParserRuleContext parent, int invokingState) { - super(parent, invokingState); - } - @Override public int getRuleIndex() { return RULE_expression; } - @Override - public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterExpression(this); - } - @Override - public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitExpression(this); - } - @Override - public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitExpression(this); - else return visitor.visitChildren(this); - } - } - - public final ExpressionContext expression() throws RecognitionException { - ExpressionContext _localctx = new ExpressionContext(_ctx, getState()); - enterRule(_localctx, 6, RULE_expression); - try { - setState(56); - _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,4,_ctx) ) { - case 1: - enterOuterAlt(_localctx, 1); + case 4: + enterOuterAlt(_localctx, 4); { - setState(54); - fieldTermQuery(); + setState(48); + existsQuery(); } break; - case 2: - enterOuterAlt(_localctx, 2); + case 5: + enterOuterAlt(_localctx, 5); { - setState(55); - fieldRangeQuery(); + setState(49); + rangeQuery(); + } + break; + case 6: + enterOuterAlt(_localctx, 6); + { + setState(50); + fieldQuery(); + } + break; + case 7: + enterOuterAlt(_localctx, 7); + { + setState(51); + fieldLessQuery(); } break; } @@ -502,19 +476,19 @@ public T accept(ParseTreeVisitor visitor) { public final NestedQueryContext nestedQuery() throws RecognitionException { NestedQueryContext _localctx = new NestedQueryContext(_ctx, getState()); - enterRule(_localctx, 8, RULE_nestedQuery); + enterRule(_localctx, 6, RULE_nestedQuery); try { enterOuterAlt(_localctx, 1); { - setState(58); + setState(54); fieldName(); - setState(59); + setState(55); match(COLON); - setState(60); + setState(56); match(LEFT_CURLY_BRACKET); - setState(61); + setState(57); query(0); - setState(62); + setState(58); match(RIGHT_CURLY_BRACKET); } } @@ -530,43 +504,51 @@ public final NestedQueryContext nestedQuery() throws RecognitionException { } @SuppressWarnings("CheckReturnValue") - public static class ParenthesizedQueryContext extends ParserRuleContext { - public TerminalNode LEFT_PARENTHESIS() { return getToken(KqlBaseParser.LEFT_PARENTHESIS, 0); } - public QueryContext query() { - return getRuleContext(QueryContext.class,0); + public static class MatchAllQueryContext extends ParserRuleContext { + public List WILDCARD() { return getTokens(KqlBaseParser.WILDCARD); } + public TerminalNode WILDCARD(int i) { + return getToken(KqlBaseParser.WILDCARD, i); } - public TerminalNode RIGHT_PARENTHESIS() { return getToken(KqlBaseParser.RIGHT_PARENTHESIS, 0); } - public ParenthesizedQueryContext(ParserRuleContext parent, int invokingState) { + public TerminalNode COLON() { return getToken(KqlBaseParser.COLON, 0); } + public MatchAllQueryContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } - @Override public int getRuleIndex() { return RULE_parenthesizedQuery; } + @Override public int getRuleIndex() { return RULE_matchAllQuery; } @Override public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterParenthesizedQuery(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterMatchAllQuery(this); } @Override public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitParenthesizedQuery(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitMatchAllQuery(this); } @Override public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitParenthesizedQuery(this); + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitMatchAllQuery(this); else return visitor.visitChildren(this); } } - public final ParenthesizedQueryContext parenthesizedQuery() throws RecognitionException { - ParenthesizedQueryContext _localctx = new ParenthesizedQueryContext(_ctx, getState()); - enterRule(_localctx, 10, RULE_parenthesizedQuery); + public final MatchAllQueryContext matchAllQuery() throws RecognitionException { + MatchAllQueryContext _localctx = new MatchAllQueryContext(_ctx, getState()); + enterRule(_localctx, 8, RULE_matchAllQuery); try { enterOuterAlt(_localctx, 1); { + setState(62); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,4,_ctx) ) { + case 1: + { + setState(60); + match(WILDCARD); + setState(61); + match(COLON); + } + break; + } setState(64); - match(LEFT_PARENTHESIS); - setState(65); - query(0); - setState(66); - match(RIGHT_PARENTHESIS); + match(WILDCARD); } } catch (RecognitionException re) { @@ -581,46 +563,43 @@ public final ParenthesizedQueryContext parenthesizedQuery() throws RecognitionEx } @SuppressWarnings("CheckReturnValue") - public static class FieldRangeQueryContext extends ParserRuleContext { - public Token operator; - public FieldNameContext fieldName() { - return getRuleContext(FieldNameContext.class,0); - } - public RangeQueryValueContext rangeQueryValue() { - return getRuleContext(RangeQueryValueContext.class,0); + public static class ParenthesizedQueryContext extends ParserRuleContext { + public TerminalNode LEFT_PARENTHESIS() { return getToken(KqlBaseParser.LEFT_PARENTHESIS, 0); } + public QueryContext query() { + return getRuleContext(QueryContext.class,0); } - public TerminalNode OP_COMPARE() { return getToken(KqlBaseParser.OP_COMPARE, 0); } - public FieldRangeQueryContext(ParserRuleContext parent, int invokingState) { + public TerminalNode RIGHT_PARENTHESIS() { return getToken(KqlBaseParser.RIGHT_PARENTHESIS, 0); } + public ParenthesizedQueryContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } - @Override public int getRuleIndex() { return RULE_fieldRangeQuery; } + @Override public int getRuleIndex() { return RULE_parenthesizedQuery; } @Override public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterFieldRangeQuery(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterParenthesizedQuery(this); } @Override public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitFieldRangeQuery(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitParenthesizedQuery(this); } @Override public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitFieldRangeQuery(this); + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitParenthesizedQuery(this); else return visitor.visitChildren(this); } } - public final FieldRangeQueryContext fieldRangeQuery() throws RecognitionException { - FieldRangeQueryContext _localctx = new FieldRangeQueryContext(_ctx, getState()); - enterRule(_localctx, 12, RULE_fieldRangeQuery); + public final ParenthesizedQueryContext parenthesizedQuery() throws RecognitionException { + ParenthesizedQueryContext _localctx = new ParenthesizedQueryContext(_ctx, getState()); + enterRule(_localctx, 10, RULE_parenthesizedQuery); try { enterOuterAlt(_localctx, 1); { + setState(66); + match(LEFT_PARENTHESIS); + setState(67); + query(0); setState(68); - fieldName(); - setState(69); - ((FieldRangeQueryContext)_localctx).operator = match(OP_COMPARE); - setState(70); - rangeQueryValue(); + match(RIGHT_PARENTHESIS); } } catch (RecognitionException re) { @@ -635,53 +614,59 @@ public final FieldRangeQueryContext fieldRangeQuery() throws RecognitionExceptio } @SuppressWarnings("CheckReturnValue") - public static class FieldTermQueryContext extends ParserRuleContext { - public TermQueryValueContext termQueryValue() { - return getRuleContext(TermQueryValueContext.class,0); - } + public static class RangeQueryContext extends ParserRuleContext { + public Token operator; public FieldNameContext fieldName() { return getRuleContext(FieldNameContext.class,0); } - public TerminalNode COLON() { return getToken(KqlBaseParser.COLON, 0); } - public FieldTermQueryContext(ParserRuleContext parent, int invokingState) { + public RangeQueryValueContext rangeQueryValue() { + return getRuleContext(RangeQueryValueContext.class,0); + } + public TerminalNode OP_LESS() { return getToken(KqlBaseParser.OP_LESS, 0); } + public TerminalNode OP_LESS_EQ() { return getToken(KqlBaseParser.OP_LESS_EQ, 0); } + public TerminalNode OP_MORE() { return getToken(KqlBaseParser.OP_MORE, 0); } + public TerminalNode OP_MORE_EQ() { return getToken(KqlBaseParser.OP_MORE_EQ, 0); } + public RangeQueryContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } - @Override public int getRuleIndex() { return RULE_fieldTermQuery; } + @Override public int getRuleIndex() { return RULE_rangeQuery; } @Override public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterFieldTermQuery(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterRangeQuery(this); } @Override public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitFieldTermQuery(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitRangeQuery(this); } @Override public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitFieldTermQuery(this); + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitRangeQuery(this); else return visitor.visitChildren(this); } } - public final FieldTermQueryContext fieldTermQuery() throws RecognitionException { - FieldTermQueryContext _localctx = new FieldTermQueryContext(_ctx, getState()); - enterRule(_localctx, 14, RULE_fieldTermQuery); + public final RangeQueryContext rangeQuery() throws RecognitionException { + RangeQueryContext _localctx = new RangeQueryContext(_ctx, getState()); + enterRule(_localctx, 12, RULE_rangeQuery); + int _la; try { enterOuterAlt(_localctx, 1); { - setState(75); - _errHandler.sync(this); - switch ( getInterpreter().adaptivePredict(_input,5,_ctx) ) { - case 1: - { - setState(72); - fieldName(); - setState(73); - match(COLON); - } - break; + setState(70); + fieldName(); + setState(71); + ((RangeQueryContext)_localctx).operator = _input.LT(1); + _la = _input.LA(1); + if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & 960L) != 0)) ) { + ((RangeQueryContext)_localctx).operator = (Token)_errHandler.recoverInline(this); + } + else { + if ( _input.LA(1)==Token.EOF ) matchedEOF = true; + _errHandler.reportMatch(this); + consume(); } - setState(77); - termQueryValue(); + setState(72); + rangeQueryValue(); } } catch (RecognitionException re) { @@ -696,61 +681,83 @@ public final FieldTermQueryContext fieldTermQuery() throws RecognitionException } @SuppressWarnings("CheckReturnValue") - public static class FieldNameContext extends ParserRuleContext { - public WildcardExpressionContext wildcardExpression() { - return getRuleContext(WildcardExpressionContext.class,0); - } - public UnquotedLiteralExpressionContext unquotedLiteralExpression() { - return getRuleContext(UnquotedLiteralExpressionContext.class,0); + public static class RangeQueryValueContext extends ParserRuleContext { + public List UNQUOTED_LITERAL() { return getTokens(KqlBaseParser.UNQUOTED_LITERAL); } + public TerminalNode UNQUOTED_LITERAL(int i) { + return getToken(KqlBaseParser.UNQUOTED_LITERAL, i); } - public QuotedStringExpressionContext quotedStringExpression() { - return getRuleContext(QuotedStringExpressionContext.class,0); + public List WILDCARD() { return getTokens(KqlBaseParser.WILDCARD); } + public TerminalNode WILDCARD(int i) { + return getToken(KqlBaseParser.WILDCARD, i); } - public FieldNameContext(ParserRuleContext parent, int invokingState) { + public TerminalNode QUOTED_STRING() { return getToken(KqlBaseParser.QUOTED_STRING, 0); } + public RangeQueryValueContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } - @Override public int getRuleIndex() { return RULE_fieldName; } + @Override public int getRuleIndex() { return RULE_rangeQueryValue; } @Override public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterFieldName(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterRangeQueryValue(this); } @Override public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitFieldName(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitRangeQueryValue(this); } @Override public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitFieldName(this); + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitRangeQueryValue(this); else return visitor.visitChildren(this); } } - public final FieldNameContext fieldName() throws RecognitionException { - FieldNameContext _localctx = new FieldNameContext(_ctx, getState()); - enterRule(_localctx, 16, RULE_fieldName); + public final RangeQueryValueContext rangeQueryValue() throws RecognitionException { + RangeQueryValueContext _localctx = new RangeQueryValueContext(_ctx, getState()); + enterRule(_localctx, 14, RULE_rangeQueryValue); + int _la; try { - setState(82); + int _alt; + setState(80); _errHandler.sync(this); switch (_input.LA(1)) { + case UNQUOTED_LITERAL: case WILDCARD: enterOuterAlt(_localctx, 1); { - setState(79); - wildcardExpression(); - } - break; - case UNQUOTED_LITERAL: - enterOuterAlt(_localctx, 2); - { - setState(80); - unquotedLiteralExpression(); + setState(75); + _errHandler.sync(this); + _alt = 1; + do { + switch (_alt) { + case 1: + { + { + setState(74); + _la = _input.LA(1); + if ( !(_la==UNQUOTED_LITERAL || _la==WILDCARD) ) { + _errHandler.recoverInline(this); + } + else { + if ( _input.LA(1)==Token.EOF ) matchedEOF = true; + _errHandler.reportMatch(this); + consume(); + } + } + } + break; + default: + throw new NoViableAltException(this); + } + setState(77); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,5,_ctx); + } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); } break; case QUOTED_STRING: - enterOuterAlt(_localctx, 3); + enterOuterAlt(_localctx, 2); { - setState(81); - quotedStringExpression(); + setState(79); + match(QUOTED_STRING); } break; default: @@ -769,55 +776,43 @@ public final FieldNameContext fieldName() throws RecognitionException { } @SuppressWarnings("CheckReturnValue") - public static class RangeQueryValueContext extends ParserRuleContext { - public UnquotedLiteralExpressionContext unquotedLiteralExpression() { - return getRuleContext(UnquotedLiteralExpressionContext.class,0); - } - public QuotedStringExpressionContext quotedStringExpression() { - return getRuleContext(QuotedStringExpressionContext.class,0); + public static class ExistsQueryContext extends ParserRuleContext { + public FieldNameContext fieldName() { + return getRuleContext(FieldNameContext.class,0); } - public RangeQueryValueContext(ParserRuleContext parent, int invokingState) { + public TerminalNode COLON() { return getToken(KqlBaseParser.COLON, 0); } + public TerminalNode WILDCARD() { return getToken(KqlBaseParser.WILDCARD, 0); } + public ExistsQueryContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } - @Override public int getRuleIndex() { return RULE_rangeQueryValue; } + @Override public int getRuleIndex() { return RULE_existsQuery; } @Override public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterRangeQueryValue(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterExistsQuery(this); } @Override public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitRangeQueryValue(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitExistsQuery(this); } @Override public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitRangeQueryValue(this); + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitExistsQuery(this); else return visitor.visitChildren(this); } } - public final RangeQueryValueContext rangeQueryValue() throws RecognitionException { - RangeQueryValueContext _localctx = new RangeQueryValueContext(_ctx, getState()); - enterRule(_localctx, 18, RULE_rangeQueryValue); + public final ExistsQueryContext existsQuery() throws RecognitionException { + ExistsQueryContext _localctx = new ExistsQueryContext(_ctx, getState()); + enterRule(_localctx, 16, RULE_existsQuery); try { - setState(86); - _errHandler.sync(this); - switch (_input.LA(1)) { - case UNQUOTED_LITERAL: - enterOuterAlt(_localctx, 1); - { - setState(84); - unquotedLiteralExpression(); - } - break; - case QUOTED_STRING: - enterOuterAlt(_localctx, 2); - { - setState(85); - quotedStringExpression(); - } - break; - default: - throw new NoViableAltException(this); + enterOuterAlt(_localctx, 1); + { + setState(82); + fieldName(); + setState(83); + match(COLON); + setState(84); + match(WILDCARD); } } catch (RecognitionException re) { @@ -832,76 +827,68 @@ public final RangeQueryValueContext rangeQueryValue() throws RecognitionExceptio } @SuppressWarnings("CheckReturnValue") - public static class TermQueryValueContext extends ParserRuleContext { - public UnquotedLiteralExpressionContext termValue; - public WildcardExpressionContext wildcardExpression() { - return getRuleContext(WildcardExpressionContext.class,0); - } - public QuotedStringExpressionContext quotedStringExpression() { - return getRuleContext(QuotedStringExpressionContext.class,0); - } - public UnquotedLiteralExpressionContext unquotedLiteralExpression() { - return getRuleContext(UnquotedLiteralExpressionContext.class,0); + public static class FieldQueryContext extends ParserRuleContext { + public FieldNameContext fieldName() { + return getRuleContext(FieldNameContext.class,0); } - public GroupingTermExpressionContext groupingTermExpression() { - return getRuleContext(GroupingTermExpressionContext.class,0); + public TerminalNode COLON() { return getToken(KqlBaseParser.COLON, 0); } + public FieldQueryValueContext fieldQueryValue() { + return getRuleContext(FieldQueryValueContext.class,0); } - public TermQueryValueContext(ParserRuleContext parent, int invokingState) { + public TerminalNode LEFT_PARENTHESIS() { return getToken(KqlBaseParser.LEFT_PARENTHESIS, 0); } + public TerminalNode RIGHT_PARENTHESIS() { return getToken(KqlBaseParser.RIGHT_PARENTHESIS, 0); } + public FieldQueryContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } - @Override public int getRuleIndex() { return RULE_termQueryValue; } + @Override public int getRuleIndex() { return RULE_fieldQuery; } @Override public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterTermQueryValue(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterFieldQuery(this); } @Override public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitTermQueryValue(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitFieldQuery(this); } @Override public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitTermQueryValue(this); + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitFieldQuery(this); else return visitor.visitChildren(this); } } - public final TermQueryValueContext termQueryValue() throws RecognitionException { - TermQueryValueContext _localctx = new TermQueryValueContext(_ctx, getState()); - enterRule(_localctx, 20, RULE_termQueryValue); + public final FieldQueryContext fieldQuery() throws RecognitionException { + FieldQueryContext _localctx = new FieldQueryContext(_ctx, getState()); + enterRule(_localctx, 18, RULE_fieldQuery); try { - setState(92); + setState(96); _errHandler.sync(this); - switch (_input.LA(1)) { - case WILDCARD: + switch ( getInterpreter().adaptivePredict(_input,7,_ctx) ) { + case 1: enterOuterAlt(_localctx, 1); { + setState(86); + fieldName(); + setState(87); + match(COLON); setState(88); - wildcardExpression(); + fieldQueryValue(); } break; - case QUOTED_STRING: + case 2: enterOuterAlt(_localctx, 2); { - setState(89); - quotedStringExpression(); - } - break; - case UNQUOTED_LITERAL: - enterOuterAlt(_localctx, 3); - { setState(90); - ((TermQueryValueContext)_localctx).termValue = unquotedLiteralExpression(); - } - break; - case LEFT_PARENTHESIS: - enterOuterAlt(_localctx, 4); - { + fieldName(); setState(91); - groupingTermExpression(); + match(COLON); + setState(92); + match(LEFT_PARENTHESIS); + setState(93); + fieldQueryValue(); + setState(94); + match(RIGHT_PARENTHESIS); } break; - default: - throw new NoViableAltException(this); } } catch (RecognitionException re) { @@ -916,43 +903,63 @@ public final TermQueryValueContext termQueryValue() throws RecognitionException } @SuppressWarnings("CheckReturnValue") - public static class GroupingTermExpressionContext extends ParserRuleContext { - public TerminalNode LEFT_PARENTHESIS() { return getToken(KqlBaseParser.LEFT_PARENTHESIS, 0); } - public UnquotedLiteralExpressionContext unquotedLiteralExpression() { - return getRuleContext(UnquotedLiteralExpressionContext.class,0); + public static class FieldLessQueryContext extends ParserRuleContext { + public FieldQueryValueContext fieldQueryValue() { + return getRuleContext(FieldQueryValueContext.class,0); } + public TerminalNode LEFT_PARENTHESIS() { return getToken(KqlBaseParser.LEFT_PARENTHESIS, 0); } public TerminalNode RIGHT_PARENTHESIS() { return getToken(KqlBaseParser.RIGHT_PARENTHESIS, 0); } - public GroupingTermExpressionContext(ParserRuleContext parent, int invokingState) { + public FieldLessQueryContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } - @Override public int getRuleIndex() { return RULE_groupingTermExpression; } + @Override public int getRuleIndex() { return RULE_fieldLessQuery; } @Override public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterGroupingTermExpression(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterFieldLessQuery(this); } @Override public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitGroupingTermExpression(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitFieldLessQuery(this); } @Override public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitGroupingTermExpression(this); + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitFieldLessQuery(this); else return visitor.visitChildren(this); } } - public final GroupingTermExpressionContext groupingTermExpression() throws RecognitionException { - GroupingTermExpressionContext _localctx = new GroupingTermExpressionContext(_ctx, getState()); - enterRule(_localctx, 22, RULE_groupingTermExpression); + public final FieldLessQueryContext fieldLessQuery() throws RecognitionException { + FieldLessQueryContext _localctx = new FieldLessQueryContext(_ctx, getState()); + enterRule(_localctx, 20, RULE_fieldLessQuery); try { - enterOuterAlt(_localctx, 1); - { - setState(94); - match(LEFT_PARENTHESIS); - setState(95); - unquotedLiteralExpression(); - setState(96); - match(RIGHT_PARENTHESIS); + setState(103); + _errHandler.sync(this); + switch (_input.LA(1)) { + case AND: + case OR: + case NOT: + case UNQUOTED_LITERAL: + case QUOTED_STRING: + case WILDCARD: + enterOuterAlt(_localctx, 1); + { + setState(98); + fieldQueryValue(); + } + break; + case LEFT_PARENTHESIS: + enterOuterAlt(_localctx, 2); + { + setState(99); + match(LEFT_PARENTHESIS); + setState(100); + fieldQueryValue(); + setState(101); + match(RIGHT_PARENTHESIS); + } + break; + default: + throw new NoViableAltException(this); } } catch (RecognitionException re) { @@ -967,57 +974,171 @@ public final GroupingTermExpressionContext groupingTermExpression() throws Recog } @SuppressWarnings("CheckReturnValue") - public static class UnquotedLiteralExpressionContext extends ParserRuleContext { + public static class FieldQueryValueContext extends ParserRuleContext { + public TerminalNode AND() { return getToken(KqlBaseParser.AND, 0); } + public TerminalNode OR() { return getToken(KqlBaseParser.OR, 0); } public List UNQUOTED_LITERAL() { return getTokens(KqlBaseParser.UNQUOTED_LITERAL); } public TerminalNode UNQUOTED_LITERAL(int i) { return getToken(KqlBaseParser.UNQUOTED_LITERAL, i); } - public UnquotedLiteralExpressionContext(ParserRuleContext parent, int invokingState) { + public List WILDCARD() { return getTokens(KqlBaseParser.WILDCARD); } + public TerminalNode WILDCARD(int i) { + return getToken(KqlBaseParser.WILDCARD, i); + } + public TerminalNode NOT() { return getToken(KqlBaseParser.NOT, 0); } + public TerminalNode QUOTED_STRING() { return getToken(KqlBaseParser.QUOTED_STRING, 0); } + public FieldQueryValueContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } - @Override public int getRuleIndex() { return RULE_unquotedLiteralExpression; } + @Override public int getRuleIndex() { return RULE_fieldQueryValue; } @Override public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterUnquotedLiteralExpression(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterFieldQueryValue(this); } @Override public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitUnquotedLiteralExpression(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitFieldQueryValue(this); } @Override public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitUnquotedLiteralExpression(this); + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitFieldQueryValue(this); else return visitor.visitChildren(this); } } - public final UnquotedLiteralExpressionContext unquotedLiteralExpression() throws RecognitionException { - UnquotedLiteralExpressionContext _localctx = new UnquotedLiteralExpressionContext(_ctx, getState()); - enterRule(_localctx, 24, RULE_unquotedLiteralExpression); + public final FieldQueryValueContext fieldQueryValue() throws RecognitionException { + FieldQueryValueContext _localctx = new FieldQueryValueContext(_ctx, getState()); + enterRule(_localctx, 22, RULE_fieldQueryValue); + int _la; try { int _alt; - enterOuterAlt(_localctx, 1); - { - setState(99); + setState(123); _errHandler.sync(this); - _alt = 1; - do { - switch (_alt) { - case 1: + switch ( getInterpreter().adaptivePredict(_input,13,_ctx) ) { + case 1: + enterOuterAlt(_localctx, 1); + { + setState(106); + _errHandler.sync(this); + _la = _input.LA(1); + if (_la==AND || _la==OR) { { + setState(105); + _la = _input.LA(1); + if ( !(_la==AND || _la==OR) ) { + _errHandler.recoverInline(this); + } + else { + if ( _input.LA(1)==Token.EOF ) matchedEOF = true; + _errHandler.reportMatch(this); + consume(); + } + } + } + + setState(109); + _errHandler.sync(this); + _alt = 1; + do { + switch (_alt) { + case 1: + { + { + setState(108); + _la = _input.LA(1); + if ( !(_la==UNQUOTED_LITERAL || _la==WILDCARD) ) { + _errHandler.recoverInline(this); + } + else { + if ( _input.LA(1)==Token.EOF ) matchedEOF = true; + _errHandler.reportMatch(this); + consume(); + } + } + } + break; + default: + throw new NoViableAltException(this); + } + setState(111); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,10,_ctx); + } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); + } + break; + case 2: + enterOuterAlt(_localctx, 2); + { + setState(114); + _errHandler.sync(this); + _alt = 1; + do { + switch (_alt) { + case 1: + { + { + setState(113); + _la = _input.LA(1); + if ( !(_la==UNQUOTED_LITERAL || _la==WILDCARD) ) { + _errHandler.recoverInline(this); + } + else { + if ( _input.LA(1)==Token.EOF ) matchedEOF = true; + _errHandler.reportMatch(this); + consume(); + } + } + } + break; + default: + throw new NoViableAltException(this); + } + setState(116); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,11,_ctx); + } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); + setState(119); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,12,_ctx) ) { + case 1: { - setState(98); - match(UNQUOTED_LITERAL); + setState(118); + _la = _input.LA(1); + if ( !(_la==AND || _la==OR) ) { + _errHandler.recoverInline(this); + } + else { + if ( _input.LA(1)==Token.EOF ) matchedEOF = true; + _errHandler.reportMatch(this); + consume(); } } break; - default: - throw new NoViableAltException(this); } - setState(101); - _errHandler.sync(this); - _alt = getInterpreter().adaptivePredict(_input,9,_ctx); - } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); + } + break; + case 3: + enterOuterAlt(_localctx, 3); + { + setState(121); + _la = _input.LA(1); + if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & 28L) != 0)) ) { + _errHandler.recoverInline(this); + } + else { + if ( _input.LA(1)==Token.EOF ) matchedEOF = true; + _errHandler.reportMatch(this); + consume(); + } + } + break; + case 4: + enterOuterAlt(_localctx, 4); + { + setState(122); + match(QUOTED_STRING); + } + break; } } catch (RecognitionException re) { @@ -1032,78 +1153,76 @@ public final UnquotedLiteralExpressionContext unquotedLiteralExpression() throws } @SuppressWarnings("CheckReturnValue") - public static class QuotedStringExpressionContext extends ParserRuleContext { - public TerminalNode QUOTED_STRING() { return getToken(KqlBaseParser.QUOTED_STRING, 0); } - public QuotedStringExpressionContext(ParserRuleContext parent, int invokingState) { - super(parent, invokingState); - } - @Override public int getRuleIndex() { return RULE_quotedStringExpression; } - @Override - public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterQuotedStringExpression(this); - } - @Override - public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitQuotedStringExpression(this); - } - @Override - public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitQuotedStringExpression(this); - else return visitor.visitChildren(this); - } - } - - public final QuotedStringExpressionContext quotedStringExpression() throws RecognitionException { - QuotedStringExpressionContext _localctx = new QuotedStringExpressionContext(_ctx, getState()); - enterRule(_localctx, 26, RULE_quotedStringExpression); - try { - enterOuterAlt(_localctx, 1); - { - setState(103); - match(QUOTED_STRING); - } - } - catch (RecognitionException re) { - _localctx.exception = re; - _errHandler.reportError(this, re); - _errHandler.recover(this, re); - } - finally { - exitRule(); + public static class FieldNameContext extends ParserRuleContext { + public Token value; + public List UNQUOTED_LITERAL() { return getTokens(KqlBaseParser.UNQUOTED_LITERAL); } + public TerminalNode UNQUOTED_LITERAL(int i) { + return getToken(KqlBaseParser.UNQUOTED_LITERAL, i); } - return _localctx; - } - - @SuppressWarnings("CheckReturnValue") - public static class WildcardExpressionContext extends ParserRuleContext { + public TerminalNode QUOTED_STRING() { return getToken(KqlBaseParser.QUOTED_STRING, 0); } public TerminalNode WILDCARD() { return getToken(KqlBaseParser.WILDCARD, 0); } - public WildcardExpressionContext(ParserRuleContext parent, int invokingState) { + public FieldNameContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); } - @Override public int getRuleIndex() { return RULE_wildcardExpression; } + @Override public int getRuleIndex() { return RULE_fieldName; } @Override public void enterRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterWildcardExpression(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).enterFieldName(this); } @Override public void exitRule(ParseTreeListener listener) { - if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitWildcardExpression(this); + if ( listener instanceof KqlBaseListener ) ((KqlBaseListener)listener).exitFieldName(this); } @Override public T accept(ParseTreeVisitor visitor) { - if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitWildcardExpression(this); + if ( visitor instanceof KqlBaseVisitor ) return ((KqlBaseVisitor)visitor).visitFieldName(this); else return visitor.visitChildren(this); } } - public final WildcardExpressionContext wildcardExpression() throws RecognitionException { - WildcardExpressionContext _localctx = new WildcardExpressionContext(_ctx, getState()); - enterRule(_localctx, 28, RULE_wildcardExpression); + public final FieldNameContext fieldName() throws RecognitionException { + FieldNameContext _localctx = new FieldNameContext(_ctx, getState()); + enterRule(_localctx, 24, RULE_fieldName); + int _la; try { - enterOuterAlt(_localctx, 1); - { - setState(105); - match(WILDCARD); + setState(132); + _errHandler.sync(this); + switch (_input.LA(1)) { + case UNQUOTED_LITERAL: + enterOuterAlt(_localctx, 1); + { + setState(126); + _errHandler.sync(this); + _la = _input.LA(1); + do { + { + { + setState(125); + ((FieldNameContext)_localctx).value = match(UNQUOTED_LITERAL); + } + } + setState(128); + _errHandler.sync(this); + _la = _input.LA(1); + } while ( _la==UNQUOTED_LITERAL ); + } + break; + case QUOTED_STRING: + enterOuterAlt(_localctx, 2); + { + setState(130); + ((FieldNameContext)_localctx).value = match(QUOTED_STRING); + } + break; + case WILDCARD: + enterOuterAlt(_localctx, 3); + { + setState(131); + ((FieldNameContext)_localctx).value = match(WILDCARD); + } + break; + default: + throw new NoViableAltException(this); } } catch (RecognitionException re) { @@ -1133,65 +1252,86 @@ private boolean query_sempred(QueryContext _localctx, int predIndex) { } public static final String _serializedATN = - "\u0004\u0001\rl\u0002\u0000\u0007\u0000\u0002\u0001\u0007\u0001\u0002"+ - "\u0002\u0007\u0002\u0002\u0003\u0007\u0003\u0002\u0004\u0007\u0004\u0002"+ - "\u0005\u0007\u0005\u0002\u0006\u0007\u0006\u0002\u0007\u0007\u0007\u0002"+ - "\b\u0007\b\u0002\t\u0007\t\u0002\n\u0007\n\u0002\u000b\u0007\u000b\u0002"+ - "\f\u0007\f\u0002\r\u0007\r\u0002\u000e\u0007\u000e\u0001\u0000\u0003\u0000"+ - " \b\u0000\u0001\u0000\u0001\u0000\u0001\u0001\u0001\u0001\u0001\u0001"+ - "\u0001\u0001\u0003\u0001(\b\u0001\u0001\u0001\u0001\u0001\u0001\u0001"+ - "\u0005\u0001-\b\u0001\n\u0001\f\u00010\t\u0001\u0001\u0002\u0001\u0002"+ - "\u0001\u0002\u0003\u00025\b\u0002\u0001\u0003\u0001\u0003\u0003\u0003"+ - "9\b\u0003\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004"+ - "\u0001\u0004\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0006"+ - "\u0001\u0006\u0001\u0006\u0001\u0006\u0001\u0007\u0001\u0007\u0001\u0007"+ - "\u0003\u0007L\b\u0007\u0001\u0007\u0001\u0007\u0001\b\u0001\b\u0001\b"+ - "\u0003\bS\b\b\u0001\t\u0001\t\u0003\tW\b\t\u0001\n\u0001\n\u0001\n\u0001"+ - "\n\u0003\n]\b\n\u0001\u000b\u0001\u000b\u0001\u000b\u0001\u000b\u0001"+ - "\f\u0004\fd\b\f\u000b\f\f\fe\u0001\r\u0001\r\u0001\u000e\u0001\u000e\u0001"+ - "\u000e\u0000\u0001\u0002\u000f\u0000\u0002\u0004\u0006\b\n\f\u000e\u0010"+ - "\u0012\u0014\u0016\u0018\u001a\u001c\u0000\u0001\u0001\u0000\u0002\u0003"+ - "j\u0000\u001f\u0001\u0000\u0000\u0000\u0002\'\u0001\u0000\u0000\u0000"+ - "\u00044\u0001\u0000\u0000\u0000\u00068\u0001\u0000\u0000\u0000\b:\u0001"+ - "\u0000\u0000\u0000\n@\u0001\u0000\u0000\u0000\fD\u0001\u0000\u0000\u0000"+ - "\u000eK\u0001\u0000\u0000\u0000\u0010R\u0001\u0000\u0000\u0000\u0012V"+ - "\u0001\u0000\u0000\u0000\u0014\\\u0001\u0000\u0000\u0000\u0016^\u0001"+ - "\u0000\u0000\u0000\u0018c\u0001\u0000\u0000\u0000\u001ag\u0001\u0000\u0000"+ - "\u0000\u001ci\u0001\u0000\u0000\u0000\u001e \u0003\u0002\u0001\u0000\u001f"+ - "\u001e\u0001\u0000\u0000\u0000\u001f \u0001\u0000\u0000\u0000 !\u0001"+ - "\u0000\u0000\u0000!\"\u0005\u0000\u0000\u0001\"\u0001\u0001\u0000\u0000"+ - "\u0000#$\u0006\u0001\uffff\uffff\u0000$%\u0005\u0004\u0000\u0000%(\u0003"+ - "\u0004\u0002\u0000&(\u0003\u0004\u0002\u0000\'#\u0001\u0000\u0000\u0000"+ - "\'&\u0001\u0000\u0000\u0000(.\u0001\u0000\u0000\u0000)*\n\u0003\u0000"+ - "\u0000*+\u0007\u0000\u0000\u0000+-\u0003\u0002\u0001\u0004,)\u0001\u0000"+ - "\u0000\u0000-0\u0001\u0000\u0000\u0000.,\u0001\u0000\u0000\u0000./\u0001"+ - "\u0000\u0000\u0000/\u0003\u0001\u0000\u0000\u00000.\u0001\u0000\u0000"+ - "\u000015\u0003\b\u0004\u000025\u0003\u0006\u0003\u000035\u0003\n\u0005"+ - "\u000041\u0001\u0000\u0000\u000042\u0001\u0000\u0000\u000043\u0001\u0000"+ - "\u0000\u00005\u0005\u0001\u0000\u0000\u000069\u0003\u000e\u0007\u0000"+ - "79\u0003\f\u0006\u000086\u0001\u0000\u0000\u000087\u0001\u0000\u0000\u0000"+ - "9\u0007\u0001\u0000\u0000\u0000:;\u0003\u0010\b\u0000;<\u0005\u0005\u0000"+ - "\u0000<=\u0005\t\u0000\u0000=>\u0003\u0002\u0001\u0000>?\u0005\n\u0000"+ - "\u0000?\t\u0001\u0000\u0000\u0000@A\u0005\u0007\u0000\u0000AB\u0003\u0002"+ - "\u0001\u0000BC\u0005\b\u0000\u0000C\u000b\u0001\u0000\u0000\u0000DE\u0003"+ - "\u0010\b\u0000EF\u0005\u0006\u0000\u0000FG\u0003\u0012\t\u0000G\r\u0001"+ - "\u0000\u0000\u0000HI\u0003\u0010\b\u0000IJ\u0005\u0005\u0000\u0000JL\u0001"+ - "\u0000\u0000\u0000KH\u0001\u0000\u0000\u0000KL\u0001\u0000\u0000\u0000"+ - "LM\u0001\u0000\u0000\u0000MN\u0003\u0014\n\u0000N\u000f\u0001\u0000\u0000"+ - "\u0000OS\u0003\u001c\u000e\u0000PS\u0003\u0018\f\u0000QS\u0003\u001a\r"+ - "\u0000RO\u0001\u0000\u0000\u0000RP\u0001\u0000\u0000\u0000RQ\u0001\u0000"+ - "\u0000\u0000S\u0011\u0001\u0000\u0000\u0000TW\u0003\u0018\f\u0000UW\u0003"+ - "\u001a\r\u0000VT\u0001\u0000\u0000\u0000VU\u0001\u0000\u0000\u0000W\u0013"+ - "\u0001\u0000\u0000\u0000X]\u0003\u001c\u000e\u0000Y]\u0003\u001a\r\u0000"+ - "Z]\u0003\u0018\f\u0000[]\u0003\u0016\u000b\u0000\\X\u0001\u0000\u0000"+ - "\u0000\\Y\u0001\u0000\u0000\u0000\\Z\u0001\u0000\u0000\u0000\\[\u0001"+ - "\u0000\u0000\u0000]\u0015\u0001\u0000\u0000\u0000^_\u0005\u0007\u0000"+ - "\u0000_`\u0003\u0018\f\u0000`a\u0005\b\u0000\u0000a\u0017\u0001\u0000"+ - "\u0000\u0000bd\u0005\u000b\u0000\u0000cb\u0001\u0000\u0000\u0000de\u0001"+ - "\u0000\u0000\u0000ec\u0001\u0000\u0000\u0000ef\u0001\u0000\u0000\u0000"+ - "f\u0019\u0001\u0000\u0000\u0000gh\u0005\f\u0000\u0000h\u001b\u0001\u0000"+ - "\u0000\u0000ij\u0005\r\u0000\u0000j\u001d\u0001\u0000\u0000\u0000\n\u001f"+ - "\'.48KRV\\e"; + "\u0004\u0001\u0010\u0087\u0002\u0000\u0007\u0000\u0002\u0001\u0007\u0001"+ + "\u0002\u0002\u0007\u0002\u0002\u0003\u0007\u0003\u0002\u0004\u0007\u0004"+ + "\u0002\u0005\u0007\u0005\u0002\u0006\u0007\u0006\u0002\u0007\u0007\u0007"+ + "\u0002\b\u0007\b\u0002\t\u0007\t\u0002\n\u0007\n\u0002\u000b\u0007\u000b"+ + "\u0002\f\u0007\f\u0001\u0000\u0003\u0000\u001c\b\u0000\u0001\u0000\u0001"+ + "\u0000\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0003\u0001$\b"+ + "\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0005\u0001)\b\u0001\n\u0001"+ + "\f\u0001,\t\u0001\u0001\u0002\u0001\u0002\u0001\u0002\u0001\u0002\u0001"+ + "\u0002\u0001\u0002\u0001\u0002\u0003\u00025\b\u0002\u0001\u0003\u0001"+ + "\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0004\u0001"+ + "\u0004\u0003\u0004?\b\u0004\u0001\u0004\u0001\u0004\u0001\u0005\u0001"+ + "\u0005\u0001\u0005\u0001\u0005\u0001\u0006\u0001\u0006\u0001\u0006\u0001"+ + "\u0006\u0001\u0007\u0004\u0007L\b\u0007\u000b\u0007\f\u0007M\u0001\u0007"+ + "\u0003\u0007Q\b\u0007\u0001\b\u0001\b\u0001\b\u0001\b\u0001\t\u0001\t"+ + "\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0003"+ + "\ta\b\t\u0001\n\u0001\n\u0001\n\u0001\n\u0001\n\u0003\nh\b\n\u0001\u000b"+ + "\u0003\u000bk\b\u000b\u0001\u000b\u0004\u000bn\b\u000b\u000b\u000b\f\u000b"+ + "o\u0001\u000b\u0004\u000bs\b\u000b\u000b\u000b\f\u000bt\u0001\u000b\u0003"+ + "\u000bx\b\u000b\u0001\u000b\u0001\u000b\u0003\u000b|\b\u000b\u0001\f\u0004"+ + "\f\u007f\b\f\u000b\f\f\f\u0080\u0001\f\u0001\f\u0003\f\u0085\b\f\u0001"+ + "\f\u0000\u0001\u0002\r\u0000\u0002\u0004\u0006\b\n\f\u000e\u0010\u0012"+ + "\u0014\u0016\u0018\u0000\u0004\u0001\u0000\u0002\u0003\u0001\u0000\u0006"+ + "\t\u0002\u0000\u000e\u000e\u0010\u0010\u0001\u0000\u0002\u0004\u0091\u0000"+ + "\u001b\u0001\u0000\u0000\u0000\u0002#\u0001\u0000\u0000\u0000\u00044\u0001"+ + "\u0000\u0000\u0000\u00066\u0001\u0000\u0000\u0000\b>\u0001\u0000\u0000"+ + "\u0000\nB\u0001\u0000\u0000\u0000\fF\u0001\u0000\u0000\u0000\u000eP\u0001"+ + "\u0000\u0000\u0000\u0010R\u0001\u0000\u0000\u0000\u0012`\u0001\u0000\u0000"+ + "\u0000\u0014g\u0001\u0000\u0000\u0000\u0016{\u0001\u0000\u0000\u0000\u0018"+ + "\u0084\u0001\u0000\u0000\u0000\u001a\u001c\u0003\u0002\u0001\u0000\u001b"+ + "\u001a\u0001\u0000\u0000\u0000\u001b\u001c\u0001\u0000\u0000\u0000\u001c"+ + "\u001d\u0001\u0000\u0000\u0000\u001d\u001e\u0005\u0000\u0000\u0001\u001e"+ + "\u0001\u0001\u0000\u0000\u0000\u001f \u0006\u0001\uffff\uffff\u0000 !"+ + "\u0005\u0004\u0000\u0000!$\u0003\u0004\u0002\u0000\"$\u0003\u0004\u0002"+ + "\u0000#\u001f\u0001\u0000\u0000\u0000#\"\u0001\u0000\u0000\u0000$*\u0001"+ + "\u0000\u0000\u0000%&\n\u0003\u0000\u0000&\'\u0007\u0000\u0000\u0000\'"+ + ")\u0003\u0002\u0001\u0003(%\u0001\u0000\u0000\u0000),\u0001\u0000\u0000"+ + "\u0000*(\u0001\u0000\u0000\u0000*+\u0001\u0000\u0000\u0000+\u0003\u0001"+ + "\u0000\u0000\u0000,*\u0001\u0000\u0000\u0000-5\u0003\u0006\u0003\u0000"+ + ".5\u0003\n\u0005\u0000/5\u0003\b\u0004\u000005\u0003\u0010\b\u000015\u0003"+ + "\f\u0006\u000025\u0003\u0012\t\u000035\u0003\u0014\n\u00004-\u0001\u0000"+ + "\u0000\u00004.\u0001\u0000\u0000\u00004/\u0001\u0000\u0000\u000040\u0001"+ + "\u0000\u0000\u000041\u0001\u0000\u0000\u000042\u0001\u0000\u0000\u0000"+ + "43\u0001\u0000\u0000\u00005\u0005\u0001\u0000\u0000\u000067\u0003\u0018"+ + "\f\u000078\u0005\u0005\u0000\u000089\u0005\f\u0000\u00009:\u0003\u0002"+ + "\u0001\u0000:;\u0005\r\u0000\u0000;\u0007\u0001\u0000\u0000\u0000<=\u0005"+ + "\u0010\u0000\u0000=?\u0005\u0005\u0000\u0000><\u0001\u0000\u0000\u0000"+ + ">?\u0001\u0000\u0000\u0000?@\u0001\u0000\u0000\u0000@A\u0005\u0010\u0000"+ + "\u0000A\t\u0001\u0000\u0000\u0000BC\u0005\n\u0000\u0000CD\u0003\u0002"+ + "\u0001\u0000DE\u0005\u000b\u0000\u0000E\u000b\u0001\u0000\u0000\u0000"+ + "FG\u0003\u0018\f\u0000GH\u0007\u0001\u0000\u0000HI\u0003\u000e\u0007\u0000"+ + "I\r\u0001\u0000\u0000\u0000JL\u0007\u0002\u0000\u0000KJ\u0001\u0000\u0000"+ + "\u0000LM\u0001\u0000\u0000\u0000MK\u0001\u0000\u0000\u0000MN\u0001\u0000"+ + "\u0000\u0000NQ\u0001\u0000\u0000\u0000OQ\u0005\u000f\u0000\u0000PK\u0001"+ + "\u0000\u0000\u0000PO\u0001\u0000\u0000\u0000Q\u000f\u0001\u0000\u0000"+ + "\u0000RS\u0003\u0018\f\u0000ST\u0005\u0005\u0000\u0000TU\u0005\u0010\u0000"+ + "\u0000U\u0011\u0001\u0000\u0000\u0000VW\u0003\u0018\f\u0000WX\u0005\u0005"+ + "\u0000\u0000XY\u0003\u0016\u000b\u0000Ya\u0001\u0000\u0000\u0000Z[\u0003"+ + "\u0018\f\u0000[\\\u0005\u0005\u0000\u0000\\]\u0005\n\u0000\u0000]^\u0003"+ + "\u0016\u000b\u0000^_\u0005\u000b\u0000\u0000_a\u0001\u0000\u0000\u0000"+ + "`V\u0001\u0000\u0000\u0000`Z\u0001\u0000\u0000\u0000a\u0013\u0001\u0000"+ + "\u0000\u0000bh\u0003\u0016\u000b\u0000cd\u0005\n\u0000\u0000de\u0003\u0016"+ + "\u000b\u0000ef\u0005\u000b\u0000\u0000fh\u0001\u0000\u0000\u0000gb\u0001"+ + "\u0000\u0000\u0000gc\u0001\u0000\u0000\u0000h\u0015\u0001\u0000\u0000"+ + "\u0000ik\u0007\u0000\u0000\u0000ji\u0001\u0000\u0000\u0000jk\u0001\u0000"+ + "\u0000\u0000km\u0001\u0000\u0000\u0000ln\u0007\u0002\u0000\u0000ml\u0001"+ + "\u0000\u0000\u0000no\u0001\u0000\u0000\u0000om\u0001\u0000\u0000\u0000"+ + "op\u0001\u0000\u0000\u0000p|\u0001\u0000\u0000\u0000qs\u0007\u0002\u0000"+ + "\u0000rq\u0001\u0000\u0000\u0000st\u0001\u0000\u0000\u0000tr\u0001\u0000"+ + "\u0000\u0000tu\u0001\u0000\u0000\u0000uw\u0001\u0000\u0000\u0000vx\u0007"+ + "\u0000\u0000\u0000wv\u0001\u0000\u0000\u0000wx\u0001\u0000\u0000\u0000"+ + "x|\u0001\u0000\u0000\u0000y|\u0007\u0003\u0000\u0000z|\u0005\u000f\u0000"+ + "\u0000{j\u0001\u0000\u0000\u0000{r\u0001\u0000\u0000\u0000{y\u0001\u0000"+ + "\u0000\u0000{z\u0001\u0000\u0000\u0000|\u0017\u0001\u0000\u0000\u0000"+ + "}\u007f\u0005\u000e\u0000\u0000~}\u0001\u0000\u0000\u0000\u007f\u0080"+ + "\u0001\u0000\u0000\u0000\u0080~\u0001\u0000\u0000\u0000\u0080\u0081\u0001"+ + "\u0000\u0000\u0000\u0081\u0085\u0001\u0000\u0000\u0000\u0082\u0085\u0005"+ + "\u000f\u0000\u0000\u0083\u0085\u0005\u0010\u0000\u0000\u0084~\u0001\u0000"+ + "\u0000\u0000\u0084\u0082\u0001\u0000\u0000\u0000\u0084\u0083\u0001\u0000"+ + "\u0000\u0000\u0085\u0019\u0001\u0000\u0000\u0000\u0010\u001b#*4>MP`gj"+ + "otw{\u0080\u0084"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { diff --git a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseVisitor.java b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseVisitor.java index 55fa21f0e899d..67253e4364190 100644 --- a/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseVisitor.java +++ b/x-pack/plugin/kql/src/main/java/org/elasticsearch/xpack/kql/parser/KqlBaseVisitor.java @@ -51,12 +51,6 @@ interface KqlBaseVisitor extends ParseTreeVisitor { * @return the visitor result */ T visitSimpleQuery(KqlBaseParser.SimpleQueryContext ctx); - /** - * Visit a parse tree produced by {@link KqlBaseParser#expression}. - * @param ctx the parse tree - * @return the visitor result - */ - T visitExpression(KqlBaseParser.ExpressionContext ctx); /** * Visit a parse tree produced by {@link KqlBaseParser#nestedQuery}. * @param ctx the parse tree @@ -64,29 +58,23 @@ interface KqlBaseVisitor extends ParseTreeVisitor { */ T visitNestedQuery(KqlBaseParser.NestedQueryContext ctx); /** - * Visit a parse tree produced by {@link KqlBaseParser#parenthesizedQuery}. - * @param ctx the parse tree - * @return the visitor result - */ - T visitParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx); - /** - * Visit a parse tree produced by {@link KqlBaseParser#fieldRangeQuery}. + * Visit a parse tree produced by {@link KqlBaseParser#matchAllQuery}. * @param ctx the parse tree * @return the visitor result */ - T visitFieldRangeQuery(KqlBaseParser.FieldRangeQueryContext ctx); + T visitMatchAllQuery(KqlBaseParser.MatchAllQueryContext ctx); /** - * Visit a parse tree produced by {@link KqlBaseParser#fieldTermQuery}. + * Visit a parse tree produced by {@link KqlBaseParser#parenthesizedQuery}. * @param ctx the parse tree * @return the visitor result */ - T visitFieldTermQuery(KqlBaseParser.FieldTermQueryContext ctx); + T visitParenthesizedQuery(KqlBaseParser.ParenthesizedQueryContext ctx); /** - * Visit a parse tree produced by {@link KqlBaseParser#fieldName}. + * Visit a parse tree produced by {@link KqlBaseParser#rangeQuery}. * @param ctx the parse tree * @return the visitor result */ - T visitFieldName(KqlBaseParser.FieldNameContext ctx); + T visitRangeQuery(KqlBaseParser.RangeQueryContext ctx); /** * Visit a parse tree produced by {@link KqlBaseParser#rangeQueryValue}. * @param ctx the parse tree @@ -94,33 +82,33 @@ interface KqlBaseVisitor extends ParseTreeVisitor { */ T visitRangeQueryValue(KqlBaseParser.RangeQueryValueContext ctx); /** - * Visit a parse tree produced by {@link KqlBaseParser#termQueryValue}. + * Visit a parse tree produced by {@link KqlBaseParser#existsQuery}. * @param ctx the parse tree * @return the visitor result */ - T visitTermQueryValue(KqlBaseParser.TermQueryValueContext ctx); + T visitExistsQuery(KqlBaseParser.ExistsQueryContext ctx); /** - * Visit a parse tree produced by {@link KqlBaseParser#groupingTermExpression}. + * Visit a parse tree produced by {@link KqlBaseParser#fieldQuery}. * @param ctx the parse tree * @return the visitor result */ - T visitGroupingTermExpression(KqlBaseParser.GroupingTermExpressionContext ctx); + T visitFieldQuery(KqlBaseParser.FieldQueryContext ctx); /** - * Visit a parse tree produced by {@link KqlBaseParser#unquotedLiteralExpression}. + * Visit a parse tree produced by {@link KqlBaseParser#fieldLessQuery}. * @param ctx the parse tree * @return the visitor result */ - T visitUnquotedLiteralExpression(KqlBaseParser.UnquotedLiteralExpressionContext ctx); + T visitFieldLessQuery(KqlBaseParser.FieldLessQueryContext ctx); /** - * Visit a parse tree produced by {@link KqlBaseParser#quotedStringExpression}. + * Visit a parse tree produced by {@link KqlBaseParser#fieldQueryValue}. * @param ctx the parse tree * @return the visitor result */ - T visitQuotedStringExpression(KqlBaseParser.QuotedStringExpressionContext ctx); + T visitFieldQueryValue(KqlBaseParser.FieldQueryValueContext ctx); /** - * Visit a parse tree produced by {@link KqlBaseParser#wildcardExpression}. + * Visit a parse tree produced by {@link KqlBaseParser#fieldName}. * @param ctx the parse tree * @return the visitor result */ - T visitWildcardExpression(KqlBaseParser.WildcardExpressionContext ctx); + T visitFieldName(KqlBaseParser.FieldNameContext ctx); } diff --git a/x-pack/plugin/kql/src/test/resources/supported-queries b/x-pack/plugin/kql/src/test/resources/supported-queries index d750f16149112..d9378cf9041c2 100644 --- a/x-pack/plugin/kql/src/test/resources/supported-queries +++ b/x-pack/plugin/kql/src/test/resources/supported-queries @@ -68,6 +68,15 @@ foo_field:foo AND (foo_field:foo bar OR foo bar) foo_field:foo AND (foo_field:foo bar OR foo bar) foo_field:foo OR (foo_field:foo bar OR foo bar) +foo:AND +foo:OR +foo:NOT +foo AND +foo OR +AND foo +OR foo +NOT + // Nested queries nested_field: { NOT foo } nested_field: { NOT foo bar } diff --git a/x-pack/plugin/kql/src/test/resources/unsupported-queries b/x-pack/plugin/kql/src/test/resources/unsupported-queries index 545b03576b331..97a26f16db141 100644 --- a/x-pack/plugin/kql/src/test/resources/unsupported-queries +++ b/x-pack/plugin/kql/src/test/resources/unsupported-queries @@ -16,14 +16,6 @@ NOT (foo_field:foo AND) foo_field:foo bar foo_field: "foo bar foo_field: foo bar" - -// Invalid boolean queries -foo AND -AND foo -foo OR -OR foo -NOT foo: - // Can't nest grouping terms parentheses foo_field:(foo (bar)) From 999274c003cc165c2634cbf94b9bad354239e22d Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 25 Oct 2024 12:43:28 +0200 Subject: [PATCH 58/60] Cleanup HotThreadsIT (example of test cleanup) (#115601) Just a quick example of how to save quite a few lines of code and make a test easier to reason about. --- .../action/admin/HotThreadsIT.java | 54 ++++++------------- 1 file changed, 15 insertions(+), 39 deletions(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java index 20c10c3d8c1f9..8c80cee58f46c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java @@ -10,7 +10,7 @@ import org.apache.logging.log4j.Level; import org.apache.lucene.util.Constants; -import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.node.hotthreads.NodeHotThreads; import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest; import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse; @@ -26,15 +26,14 @@ import org.hamcrest.Matcher; import java.util.Map; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.ExecutionException; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; @@ -44,11 +43,10 @@ public class HotThreadsIT extends ESIntegTestCase { - public void testHotThreadsDontFail() throws InterruptedException { + public void testHotThreadsDontFail() throws InterruptedException, ExecutionException { // This test just checks if nothing crashes or gets stuck etc. createIndex("test"); final int iters = scaledRandomIntBetween(2, 20); - final AtomicBoolean hasErrors = new AtomicBoolean(false); for (int i = 0; i < iters; i++) { final NodesHotThreadsRequest request = new NodesHotThreadsRequest( Strings.EMPTY_ARRAY, @@ -67,36 +65,7 @@ public void testHotThreadsDontFail() throws InterruptedException { randomBoolean() ) ); - final CountDownLatch latch = new CountDownLatch(1); - client().execute(TransportNodesHotThreadsAction.TYPE, request, new ActionListener<>() { - @Override - public void onResponse(NodesHotThreadsResponse nodeHotThreads) { - boolean success = false; - try { - assertThat(nodeHotThreads, notNullValue()); - Map nodesMap = nodeHotThreads.getNodesMap(); - assertThat(nodeHotThreads.failures(), empty()); - assertThat(nodesMap.size(), equalTo(cluster().size())); - for (NodeHotThreads ht : nodeHotThreads.getNodes()) { - assertNotNull(ht.getHotThreads()); - } - success = true; - } finally { - if (success == false) { - hasErrors.set(true); - } - latch.countDown(); - } - } - - @Override - public void onFailure(Exception e) { - logger.error("FAILED", e); - hasErrors.set(true); - latch.countDown(); - fail(); - } - }); + final ActionFuture hotThreadsFuture = client().execute(TransportNodesHotThreadsAction.TYPE, request); indexRandom( true, @@ -105,7 +74,7 @@ public void onFailure(Exception e) { prepareIndex("test").setId("3").setSource("field1", "value3") ); ensureSearchable(); - while (latch.getCount() > 0) { + while (hotThreadsFuture.isDone() == false) { assertHitCount( prepareSearch().setQuery(matchAllQuery()) .setPostFilter( @@ -115,8 +84,15 @@ public void onFailure(Exception e) { 3L ); } - safeAwait(latch); - assertThat(hasErrors.get(), is(false)); + assertResponse(hotThreadsFuture, nodeHotThreads -> { + assertThat(nodeHotThreads, notNullValue()); + Map nodesMap = nodeHotThreads.getNodesMap(); + assertThat(nodeHotThreads.failures(), empty()); + assertThat(nodesMap.size(), equalTo(cluster().size())); + for (NodeHotThreads ht : nodeHotThreads.getNodes()) { + assertNotNull(ht.getHotThreads()); + } + }); } } From e3523c159106255a96b8c00339f6c565b69c266a Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Fri, 25 Oct 2024 13:01:41 +0200 Subject: [PATCH 59/60] [DOCS] Fix link syntax in connectors-API-tutorial.asciidoc (#115635) --- docs/reference/connector/docs/connectors-API-tutorial.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/connector/docs/connectors-API-tutorial.asciidoc b/docs/reference/connector/docs/connectors-API-tutorial.asciidoc index 5275f82de1b1f..4118c564e4759 100644 --- a/docs/reference/connector/docs/connectors-API-tutorial.asciidoc +++ b/docs/reference/connector/docs/connectors-API-tutorial.asciidoc @@ -367,7 +367,7 @@ Refer to the individual connectors-references,connector references for these con ==== We're using a self-managed connector in this tutorial. To use these APIs with an Elastic managed connector, there's some extra setup for API keys. -Refer to native-connectors-manage-API-keys for details. +Refer to <> for details. ==== We're now ready to sync our PostgreSQL data to {es}. From 6e0bdbec0ade4af2b5d130aee6bf9e76a64f0e19 Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Fri, 25 Oct 2024 13:38:35 +0200 Subject: [PATCH 60/60] Fixed flaky test after PR that disallows functions to return TEXT (#115633) * Fixed flaky test after PR that disallows functions to return TEXT * Also ignore TEXT/KEYWORD combinations because they are now valid * Unmute the test --- muted-tests.yml | 3 --- .../elasticsearch/xpack/esql/analysis/AnalyzerTests.java | 8 +++++++- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 4869b669f6220..5c94c0aff60b6 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -282,9 +282,6 @@ tests: - class: org.elasticsearch.oldrepos.OldRepositoryAccessIT method: testOldRepoAccess issue: https://github.com/elastic/elasticsearch/issues/115631 -- class: org.elasticsearch.xpack.esql.analysis.AnalyzerTests - method: testMvAppendValidation - issue: https://github.com/elastic/elasticsearch/issues/115636 # Examples: # diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index c18f55a651408..b86935dcd03da 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -56,6 +56,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -1879,6 +1880,11 @@ public void testMvAppendValidation() { Supplier supplier = () -> randomInt(fields.length - 1); int first = supplier.get(); int second = randomValueOtherThan(first, supplier); + Function noText = (type) -> type.equals("text") ? "keyword" : type; + assumeTrue( + "Ignore tests with TEXT and KEYWORD combinations because they are now valid", + noText.apply(fields[first][0]).equals(noText.apply(fields[second][0])) == false + ); String signature = "mv_append(" + fields[first][0] + ", " + fields[second][0] + ")"; verifyUnsupported( @@ -1886,7 +1892,7 @@ public void testMvAppendValidation() { "second argument of [" + signature + "] must be [" - + fields[first][1] + + noText.apply(fields[first][1]) + "], found value [" + fields[second][0] + "] type ["