diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 46da86dd4d1f..c5414fa99433 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -60,6 +60,7 @@ BWC_VERSION: - "7.17.9" - "7.17.10" - "7.17.11" + - "7.17.12" - "8.0.0" - "8.0.1" - "8.1.0" @@ -90,5 +91,6 @@ BWC_VERSION: - "8.8.0" - "8.8.1" - "8.8.2" + - "8.8.3" - "8.9.0" - "8.10.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index f6efee522355..a59023ae923d 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,5 +1,5 @@ BWC_VERSION: - - "7.17.11" - - "8.8.2" + - "7.17.12" + - "8.8.3" - "8.9.0" - "8.10.0" diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java index 396ffe1b5fe8..ed1ce9076de5 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java @@ -148,7 +148,7 @@ public TopDocs benchmark() throws IOException { private Query scriptScoreQuery(ScoreScript.Factory factory) { ScoreScript.LeafFactory leafFactory = factory.newFactory(Map.of(), lookup); - return new ScriptScoreQuery(new MatchAllDocsQuery(), null, leafFactory, lookup, null, "test", 0, IndexVersion.CURRENT); + return new ScriptScoreQuery(new MatchAllDocsQuery(), null, leafFactory, lookup, null, "test", 0, IndexVersion.current()); } private ScoreScript.Factory bareMetalScript() { diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/vector/DistanceFunctionBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/vector/DistanceFunctionBenchmark.java index 11180f503e30..fe6ba4da29f3 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/vector/DistanceFunctionBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/vector/DistanceFunctionBenchmark.java @@ -242,7 +242,7 @@ private DotBinaryFloatBenchmarkFunction(int dims) { @Override public void execute(Consumer consumer) { - new BinaryDenseVector(docFloatVector, docVector, dims, IndexVersion.CURRENT).dotProduct(queryVector); + new BinaryDenseVector(docFloatVector, docVector, dims, IndexVersion.current()).dotProduct(queryVector); } } @@ -290,7 +290,7 @@ private CosineBinaryFloatBenchmarkFunction(int dims) { @Override public void execute(Consumer consumer) { - new BinaryDenseVector(docFloatVector, docVector, dims, IndexVersion.CURRENT).cosineSimilarity(queryVector, false); + new BinaryDenseVector(docFloatVector, docVector, dims, IndexVersion.current()).cosineSimilarity(queryVector, false); } } @@ -338,7 +338,7 @@ private L1BinaryFloatBenchmarkFunction(int dims) { @Override public void execute(Consumer consumer) { - new BinaryDenseVector(docFloatVector, docVector, dims, IndexVersion.CURRENT).l1Norm(queryVector); + new BinaryDenseVector(docFloatVector, docVector, dims, IndexVersion.current()).l1Norm(queryVector); } } @@ -386,7 +386,7 @@ private L2BinaryFloatBenchmarkFunction(int dims) { @Override public void execute(Consumer consumer) { - new BinaryDenseVector(docFloatVector, docVector, dims, IndexVersion.CURRENT).l1Norm(queryVector); + new BinaryDenseVector(docFloatVector, docVector, dims, IndexVersion.current()).l1Norm(queryVector); } } diff --git a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt index 64f89d6c57ed..a8f380195e16 100644 --- a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt +++ b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt @@ -150,3 +150,6 @@ org.elasticsearch.cluster.service.ClusterService#submitUnbatchedStateUpdateTask( @defaultMessage Reacting to the published cluster state is an obstruction to batching cluster state tasks which leads to performance and stability bugs. Use the variants that accept a Runnable instead. org.elasticsearch.cluster.ClusterStateTaskExecutor$TaskContext#success(java.util.function.Consumer) org.elasticsearch.cluster.ClusterStateTaskExecutor$TaskContext#success(java.util.function.Consumer, org.elasticsearch.cluster.ClusterStateAckListener) + +@defaultMessage ClusterState#transportVersions are for internal use only. Use ClusterState#getMinTransportVersion or a different version. See TransportVersion javadocs for more info. +org.elasticsearch.cluster.ClusterState#transportVersions() diff --git a/docs/changelog/93545.yaml b/docs/changelog/93545.yaml new file mode 100644 index 000000000000..4367e44024e5 --- /dev/null +++ b/docs/changelog/93545.yaml @@ -0,0 +1,5 @@ +pr: 93545 +summary: Improve error message when aggregation doesn't support counter field +area: Aggregations +type: enhancement +issues: [] diff --git a/docs/changelog/96161.yaml b/docs/changelog/96161.yaml index a255a4e71083..66368c403a94 100644 --- a/docs/changelog/96161.yaml +++ b/docs/changelog/96161.yaml @@ -4,3 +4,28 @@ area: "Search" type: enhancement issues: - 95541 +highlight: + title: Better indexing and search performance under concurrent indexing and search + body: "When a query like a match phrase query or a terms query targets a constant keyword field we can skip\ + query execution on shards where the query is rewritten to match no documents. We take advantage of index mappings\ + including constant keyword fields and rewrite queries in such a way that, if a constant keyword field does not\ + match the value defined in the index mapping, we rewrite the query to match no document. This will result in the\ + shard level request to return immediately, before the query is executed on the data node and, as a result, skipping\ + the shard completely. Here we leverage the ability to skip shards whenever possible to avoid unnecessary shard\ + refreshes and improve query latency (by not doing any search-related I/O). Avoiding such unnecessary shard refreshes\ + improves query latency since the search thread does not need to wait anymore for unnecessary shard refreshes. Shards\ + not matching the query criteria will remain in a search-idle state and indexing throughput will not be negatively\ + affected by a refresh. Before introducing this change a query hitting multiple shards, including those with no\ + documents matching the search criteria (think about using index patterns or data streams with many backing indices),\ + would potentially result in a \"shard refresh storm\" increasing query latency as a result of the search thread\ + waiting on all shard refreshes to complete before being able to initiate and carry out the search operation.\ + After introducing this change the search thread will just need to wait for refreshes to be completed on shards\ + including relevant data. Note that execution of the shard pre-filter and the corresponding \"can match\" phase where\ + rewriting happens, depends on the overall number of shards involved and on whether there is at least one of them\ + returning a non-empty result (see 'pre_filter_shard_size' setting to understand how to control this behaviour).\ + Elasticsearch does the rewrite operation on the data node in the so called \"can match\" phase, taking advantage of\ + the fact that, at that moment, we can access index mappings and extract information about constant keyword fields\ + and their values. This means we still\"fan-out\" search queries from the coordinator node to involved data nodes.\ + Rewriting queries based on index mappings is not possible on the coordinator node because the coordinator node is\ + missing index mapping information." + notable: true diff --git a/docs/changelog/96243.yaml b/docs/changelog/96243.yaml deleted file mode 100644 index fc89aa67ce1c..000000000000 --- a/docs/changelog/96243.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 96243 -summary: Support dotted field notations in the reroute processor -area: Ingest Node -type: bug -issues: [] diff --git a/docs/changelog/96251.yaml b/docs/changelog/96251.yaml deleted file mode 100644 index 8405e710a2ad..000000000000 --- a/docs/changelog/96251.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 96251 -summary: Avoiding running IO on scheduler thread in `ResourceWatcherService` -area: Watcher -type: bug -issues: [] diff --git a/docs/changelog/96540.yaml b/docs/changelog/96540.yaml deleted file mode 100644 index 4021ede17692..000000000000 --- a/docs/changelog/96540.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 96540 -summary: Fix translation of queries involving Version vals -area: SQL -type: bug -issues: - - 96509 diff --git a/docs/changelog/96551.yaml b/docs/changelog/96551.yaml deleted file mode 100644 index 3184ea120869..000000000000 --- a/docs/changelog/96551.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 96551 -summary: Make cluster health API cancellable -area: Distributed -type: bug -issues: [] diff --git a/docs/changelog/96606.yaml b/docs/changelog/96606.yaml deleted file mode 100644 index 4ff1f498b266..000000000000 --- a/docs/changelog/96606.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 96606 -summary: The get data stream api incorrectly prints warning log for upgraded tsdb - data streams -area: TSDB -type: bug -issues: [] diff --git a/docs/changelog/96668.yaml b/docs/changelog/96668.yaml deleted file mode 100644 index 483c0f462743..000000000000 --- a/docs/changelog/96668.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 96668 -summary: Fix iteration of empty percentiles throwing Null Pointer Exception -area: Aggregations -type: bug -issues: - - 96626 diff --git a/docs/changelog/96738.yaml b/docs/changelog/96738.yaml deleted file mode 100644 index f4fb71d42f81..000000000000 --- a/docs/changelog/96738.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 96738 -summary: Ensure NLP model inference queue is always cleared after shutdown or failure -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/96782.yaml b/docs/changelog/96782.yaml deleted file mode 100644 index a3bb799d6340..000000000000 --- a/docs/changelog/96782.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 96782 -summary: Increase concurrent request of opening point-in-time -area: Search -type: bug -issues: [] diff --git a/docs/changelog/96785.yaml b/docs/changelog/96785.yaml deleted file mode 100644 index 9918cbae4663..000000000000 --- a/docs/changelog/96785.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 96785 -summary: Adding null check to fix potential NPE -area: Transform -type: enhancement -issues: - - 96781 diff --git a/docs/changelog/96821.yaml b/docs/changelog/96821.yaml deleted file mode 100644 index 60cc5cc35c0e..000000000000 --- a/docs/changelog/96821.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 96821 -summary: Change rollup thread pool settings -area: TSDB -type: enhancement -issues: - - 96758 diff --git a/docs/changelog/96843.yaml b/docs/changelog/96843.yaml deleted file mode 100644 index c1f4439bc65a..000000000000 --- a/docs/changelog/96843.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 96843 -summary: Uses `ClusterSettings` instead of Node `Settings` in `HealthMetadataService` -area: Health -type: bug -issues: - - 96219 diff --git a/docs/changelog/97041.yaml b/docs/changelog/97041.yaml new file mode 100644 index 000000000000..6bd6f642be26 --- /dev/null +++ b/docs/changelog/97041.yaml @@ -0,0 +1,5 @@ +pr: 97041 +summary: Introduce downsampling configuration for data stream lifecycle +area: Data streams +type: feature +issues: [] diff --git a/docs/changelog/97079.yaml b/docs/changelog/97079.yaml new file mode 100644 index 000000000000..f24096e771a5 --- /dev/null +++ b/docs/changelog/97079.yaml @@ -0,0 +1,5 @@ +pr: 97079 +summary: Enable Serverless API protections dynamically +area: Infra/REST API +type: enhancement +issues: [] diff --git a/docs/changelog/97111.yaml b/docs/changelog/97111.yaml new file mode 100644 index 000000000000..cd5810f1cdf0 --- /dev/null +++ b/docs/changelog/97111.yaml @@ -0,0 +1,5 @@ +pr: 97111 +summary: Fix cluster settings update task acknowledgment +area: Cluster Coordination +type: bug +issues: [] diff --git a/docs/changelog/97142.yaml b/docs/changelog/97142.yaml new file mode 100644 index 000000000000..30fcbb337ae0 --- /dev/null +++ b/docs/changelog/97142.yaml @@ -0,0 +1,5 @@ +pr: 97142 +summary: The model loading service should not notify listeners in a sync block +area: Machine Learning +type: bug +issues: [] diff --git a/docs/changelog/97159.yaml b/docs/changelog/97159.yaml new file mode 100644 index 000000000000..ddd7bb928d7b --- /dev/null +++ b/docs/changelog/97159.yaml @@ -0,0 +1,5 @@ +pr: 97159 +summary: Improve exists query rewrite +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/97203.yaml b/docs/changelog/97203.yaml new file mode 100644 index 000000000000..56d9ddd446b7 --- /dev/null +++ b/docs/changelog/97203.yaml @@ -0,0 +1,5 @@ +pr: 97203 +summary: Fix possible NPE when transportversion is null in `MainResponse` +area: Infra/REST API +type: bug +issues: [] diff --git a/docs/changelog/97208.yaml b/docs/changelog/97208.yaml new file mode 100644 index 000000000000..943df2046865 --- /dev/null +++ b/docs/changelog/97208.yaml @@ -0,0 +1,5 @@ +pr: 97208 +summary: Improve match query rewrite +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/97209.yaml b/docs/changelog/97209.yaml new file mode 100644 index 000000000000..ba6f917e618f --- /dev/null +++ b/docs/changelog/97209.yaml @@ -0,0 +1,5 @@ +pr: 97209 +summary: Improve prefix query rewrite +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/97224.yaml b/docs/changelog/97224.yaml new file mode 100644 index 000000000000..50605bd6ad67 --- /dev/null +++ b/docs/changelog/97224.yaml @@ -0,0 +1,5 @@ +pr: 97224 +summary: Remove exception wrapping in `BatchedRerouteService` +area: Allocation +type: bug +issues: [] diff --git a/docs/changelog/97234.yaml b/docs/changelog/97234.yaml new file mode 100644 index 000000000000..c4326fcfcc1c --- /dev/null +++ b/docs/changelog/97234.yaml @@ -0,0 +1,5 @@ +pr: 97234 +summary: Add "operator" field to authenticate response +area: Authorization +type: enhancement +issues: [] diff --git a/docs/changelog/97274.yaml b/docs/changelog/97274.yaml new file mode 100644 index 000000000000..537e013024c1 --- /dev/null +++ b/docs/changelog/97274.yaml @@ -0,0 +1,5 @@ +pr: 97274 +summary: Improve model downloader robustness +area: Machine Learning +type: bug +issues: [] diff --git a/docs/plugins/analysis-nori.asciidoc b/docs/plugins/analysis-nori.asciidoc index a28c55553d69..1a3153fa3bea 100644 --- a/docs/plugins/analysis-nori.asciidoc +++ b/docs/plugins/analysis-nori.asciidoc @@ -305,7 +305,7 @@ Which responds with: The `nori_part_of_speech` token filter removes tokens that match a set of part-of-speech tags. The list of supported tags and their meanings can be found here: -{lucene-core-javadoc}/../analyzers-nori/org/apache/lucene/analysis/ko/POS.Tag.html[Part of speech tags] +{lucene-core-javadoc}/../analysis/nori/org/apache/lucene/analysis/ko/POS.Tag.html[Part of speech tags] It accepts the following setting: diff --git a/docs/reference/data-management.asciidoc b/docs/reference/data-management.asciidoc index fa7d10c37863..f189ef4e4e96 100644 --- a/docs/reference/data-management.asciidoc +++ b/docs/reference/data-management.asciidoc @@ -20,17 +20,32 @@ so you can move it to less expensive, less performant hardware. For your oldest data, what matters is that you have access to the data. It's ok if queries take longer to complete. -To help you manage your data, {es} enables you to: +To help you manage your data, {es} offers you: +* <> ({ilm-init}) to manage both indices and data streams and it is fully customisable, and +* <> which is the built-in lifecycle of data streams and addresses the most +common lifecycle management needs. + +preview::["The built-in data stream lifecycle is in technical preview and may be changed or removed in a future release. Elastic will apply best effort to fix any issues, but this feature is not subject to the support SLA of official GA features."] + +**{ilm-init}** can be used to manage both indices and data streams and it allows you to: + +* Define the retention period of your data. The retention period is the minimum time your data will be stored in {es}. +Data older than this period can be deleted by {es}. * Define <> of data nodes with different performance characteristics. -* Automatically transition indices through the data tiers according to your performance needs and retention policies -with <> ({ilm-init}). +* Automatically transition indices through the data tiers according to your performance needs and retention policies. * Leverage <> stored in a remote repository to provide resiliency for your older indices while reducing operating costs and maintaining search performance. * Perform <> of data stored on less-performant hardware. + +**Data stream lifecycle** is less feature rich but is focused on simplicity, so it allows you to easily: + +* Define the retention period of your data. The retention period is the minimum time your data will be stored in {es}. +Data older than this period can be deleted by {es} at a later time. +* Improve the performance of your data stream by performing background operations that will optimise the way your data +stream is stored. -- include::ilm/index.asciidoc[] include::datatiers.asciidoc[] - diff --git a/docs/reference/data-streams/data-stream-apis.asciidoc b/docs/reference/data-streams/data-stream-apis.asciidoc index 42b42ead04ab..b21a08aa853b 100644 --- a/docs/reference/data-streams/data-stream-apis.asciidoc +++ b/docs/reference/data-streams/data-stream-apis.asciidoc @@ -12,6 +12,14 @@ The following APIs are available for managing <>: * <> * <> +[[data-stream-lifecycle-api]] +The following APIs are available for managing the built-in lifecycle of data streams: + +* <> preview:[] +* <> preview:[] +* <> preview:[] +* <> preview:[] + The following API is available for <>: * <> @@ -33,4 +41,12 @@ include::{es-repo-dir}/data-streams/promote-data-stream-api.asciidoc[] include::{es-repo-dir}/data-streams/modify-data-streams-api.asciidoc[] +include::{es-repo-dir}/data-streams/lifecycle/apis/put-lifecycle.asciidoc[] + +include::{es-repo-dir}/data-streams/lifecycle/apis/get-lifecycle.asciidoc[] + +include::{es-repo-dir}/data-streams/lifecycle/apis/delete-lifecycle.asciidoc[] + +include::{es-repo-dir}/data-streams/lifecycle/apis/explain-lifecycle.asciidoc[] + include::{es-repo-dir}/indices/downsample-data-stream.asciidoc[] diff --git a/docs/reference/data-streams/data-streams.asciidoc b/docs/reference/data-streams/data-streams.asciidoc index 9c8864f42c24..307930d64c4f 100644 --- a/docs/reference/data-streams/data-streams.asciidoc +++ b/docs/reference/data-streams/data-streams.asciidoc @@ -135,3 +135,4 @@ include::set-up-a-data-stream.asciidoc[] include::use-a-data-stream.asciidoc[] include::change-mappings-and-settings.asciidoc[] include::tsds.asciidoc[] +include::lifecycle/index.asciidoc[] diff --git a/docs/reference/dlm/apis/delete-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/delete-lifecycle.asciidoc similarity index 91% rename from docs/reference/dlm/apis/delete-lifecycle.asciidoc rename to docs/reference/data-streams/lifecycle/apis/delete-lifecycle.asciidoc index f01992d52226..fd481d7ca481 100644 --- a/docs/reference/dlm/apis/delete-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/delete-lifecycle.asciidoc @@ -1,10 +1,10 @@ -[[dlm-delete-lifecycle]] +[[data-streams-delete-lifecycle]] === Delete the lifecycle of a data stream ++++ Delete Data Stream Lifecycle ++++ -experimental::[] +preview::[] Deletes the lifecycle from a set of data streams. @@ -14,18 +14,18 @@ Deletes the lifecycle from a set of data streams. * If the {es} {security-features} are enabled, you must have the `manage_data_stream_lifecycle` index privilege or higher to use this API. For more information, see <>. -[[dlm-delete-lifecycle-request]] +[[data-streams-delete-lifecycle-request]] ==== {api-request-title} `DELETE _data_stream//_lifecycle` -[[dlm-delete-lifecycle-desc]] +[[data-streams-delete-lifecycle-desc]] ==== {api-description-title} Deletes the lifecycle from the specified data streams. If multiple data streams are provided but at least one of them does not exist, then the deletion of the lifecycle will fail for all of them and the API will respond with `404`. -[[dlm-delete-lifecycle-path-params]] +[[data-streams-delete-lifecycle-path-params]] ==== {api-path-parms-title} ``:: @@ -41,7 +41,7 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=ds-expand-wildcards] + Defaults to `open`. -[[dlm-delete-lifecycle-example]] +[[data-streams-delete-lifecycle-example]] ==== {api-examples-title} //// diff --git a/docs/reference/dlm/apis/explain-data-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc similarity index 83% rename from docs/reference/dlm/apis/explain-data-lifecycle.asciidoc rename to docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc index 25b75e1a53af..bb685fa10b2b 100644 --- a/docs/reference/dlm/apis/explain-data-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/explain-lifecycle.asciidoc @@ -1,41 +1,39 @@ -[[dlm-explain-lifecycle]] -=== Explain Lifecycle API +[[data-streams-explain-lifecycle]] +=== Explain data stream lifecycle ++++ -Explain Data Lifecycle +Explain Data Stream Lifecycle ++++ -experimental::[] +preview::[] Retrieves the current data lifecycle status for one or more data stream backing indices. [[explain-lifecycle-api-prereqs]] ==== {api-prereq-title} -* Nit: would rephrase as: - If the {es} {security-features} are enabled, you must have at least the `manage_data_stream_lifecycle` index privilege or `view_index_metadata` index privilege to use this API. For more information, see <>. -[[dlm-explain-lifecycle-request]] +[[data-streams-explain-lifecycle-request]] ==== {api-request-title} `GET /_lifecycle/explain` -[[dlm-explain-lifecycle-desc]] +[[data-streams-explain-lifecycle-desc]] ==== {api-description-title} -Retrieves information about the index's current DLM lifecycle state, such as +Retrieves information about the index or data stream's current data stream lifecycle state, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any error that {es} might've encountered during the lifecycle execution. -[[dlm-explain-lifecycle-path-params]] +[[data-streams-explain-lifecycle-path-params]] ==== {api-path-parms-title} ``:: (Required, string) Comma-separated list of indices. -[[dlm-explain-lifecycle-query-params]] +[[data-streams-explain-lifecycle-query-params]] ==== {api-query-parms-title} `include_defaults`:: @@ -44,7 +42,7 @@ execution. include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms] -[[dlm-explain-lifecycle-example]] +[[data-streams-explain-lifecycle-example]] ==== {api-examples-title} The following example retrieves the lifecycle state of the index `.ds-metrics-2023.03.22-000001`: @@ -53,9 +51,9 @@ The following example retrieves the lifecycle state of the index `.ds-metrics-20 -------------------------------------------------- GET .ds-metrics-2023.03.22-000001/_lifecycle/explain -------------------------------------------------- -// TEST[skip:we're not setting up DLM in these tests] +// TEST[skip:we're not setting up data stream lifecycle in these tests] -If the index is managed by DLM `explain` will show the `managed_by_lifecycle` field +If the index is managed by a data stream lifecycle `explain` will show the `managed_by_lifecycle` field set to `true` and the rest of the response will contain information about the lifecycle execution status for this index: @@ -77,8 +75,8 @@ lifecycle execution status for this index: -------------------------------------------------- // TESTRESPONSE[skip:the result is for illustrating purposes only] -<1> Shows if the index is being managed by DLM. If the index is not managed by -DLM the other fields will not be shown +<1> Shows if the index is being managed by data stream lifecycle. If the index is not managed by +a data stream lifecycle the other fields will not be shown <2> When the index was created, this timestamp is used to determine when to rollover <3> The time since the index creation (used for calculating when to rollover diff --git a/docs/reference/dlm/apis/get-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc similarity index 91% rename from docs/reference/dlm/apis/get-lifecycle.asciidoc rename to docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc index 5332e2a293b7..64c8dab90ef6 100644 --- a/docs/reference/dlm/apis/get-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/get-lifecycle.asciidoc @@ -1,10 +1,10 @@ -[[dlm-get-lifecycle]] +[[data-streams-get-lifecycle]] === Get the lifecycle of a data stream ++++ Get Data Stream Lifecycle ++++ -experimental::[] +preview::[] Gets the lifecycle of a set of data streams. @@ -15,12 +15,12 @@ Gets the lifecycle of a set of data streams. <>, the `manage_data_stream_lifecycle` index privilege, or the `view_index_metadata` privilege to use this API. For more information, see <>. -[[dlm-get-lifecycle-request]] +[[data-streams-get-lifecycle-request]] ==== {api-request-title} `GET _data_stream//_lifecycle` -[[dlm-get-lifecycle-desc]] +[[data-streams-get-lifecycle-desc]] ==== {api-description-title} Gets the lifecycle of the specified data streams. If multiple data streams are requested but at least one of them @@ -28,7 +28,7 @@ does not exist, then the API will respond with `404` since at least one of the r If the requested data streams do not have a lifecycle configured they will still be included in the API response but the `lifecycle` key will be missing. -[[dlm-get-lifecycle-path-params]] +[[data-streams-get-lifecycle-path-params]] ==== {api-path-parms-title} ``:: @@ -75,12 +75,12 @@ duration the document could be deleted. When undefined, every document in this d `rollover`:: (Optional, object) The conditions which will trigger the rollover of a backing index as configured by the cluster setting -`cluster.lifecycle.default.rollover`. This property is an implementation detail and it will only be retrieved when the query -param `include_defaults` is set to `true`. The contents of this field are subject to change. +`cluster.lifecycle.default.rollover`. This property is an implementation detail and it will only be retrieved +when the query param `include_defaults` is set to `true`. The contents of this field are subject to change. ===== ==== -[[dlm-get-lifecycle-example]] +[[data-streams-get-lifecycle-example]] ==== {api-examples-title} //// diff --git a/docs/reference/dlm/apis/put-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc similarity index 91% rename from docs/reference/dlm/apis/put-lifecycle.asciidoc rename to docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc index 3a77ae984b26..1fe0bdb3ee9c 100644 --- a/docs/reference/dlm/apis/put-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc @@ -1,10 +1,10 @@ -[[dlm-put-lifecycle]] +[[data-streams-put-lifecycle]] === Set the lifecycle of a data stream ++++ Put Data Stream Lifecycle ++++ -experimental::[] +preview::[] Configures the data lifecycle for the targeted data streams. @@ -14,18 +14,18 @@ Configures the data lifecycle for the targeted data streams. If the {es} {security-features} are enabled, you must have the `manage_data_stream_lifecycle` index privilege or higher to use this API. For more information, see <>. -[[dlm-put-lifecycle-request]] +[[data-streams-put-lifecycle-request]] ==== {api-request-title} `PUT _data_stream//_lifecycle` -[[dlm-put-lifecycle-desc]] +[[data-streams-put-lifecycle-desc]] ==== {api-description-title} Configures the data lifecycle for the targeted data streams. If multiple data streams are provided but at least one of them does not exist, then the update of the lifecycle will fail for all of them and the API will respond with `404`. -[[dlm-put-lifecycle-path-params]] +[[data-streams-put-lifecycle-path-params]] ==== {api-path-parms-title} ``:: @@ -55,7 +55,7 @@ If defined, every document added to this data stream will be stored at least for duration the document could be deleted. When empty, every document in this data stream will be stored indefinitely. ==== -[[dlm-put-lifecycle-example]] +[[data-streams-put-lifecycle-example]] ==== {api-examples-title} The following example sets the lifecycle of `my-data-stream`: diff --git a/docs/reference/data-streams/lifecycle/index.asciidoc b/docs/reference/data-streams/lifecycle/index.asciidoc new file mode 100644 index 000000000000..9aacf14d8e61 --- /dev/null +++ b/docs/reference/data-streams/lifecycle/index.asciidoc @@ -0,0 +1,64 @@ +[role="xpack"] +[[data-stream-lifecycle]] +== Data stream lifecycle + +preview::[] + +A data stream lifecycle is the built-in mechanism data streams use to manage their lifecycle. It enables you to easily +automate the management of your data streams according to your retention requirements. For example, you could configure +the lifecycle to: + +* Ensure that data indexed in the data stream will be kept at least for the retention time you defined. +* Ensure that data older than the retention period will be deleted automatically by {es} at a later time. + +To achieve that, it supports: + +* Automatic <>, which chunks your incoming data in smaller pieces to facilitate better performance +and backwards incompatible mapping changes. +* Configurable retention, which allows you to configure the time period for which your data is guaranteed to be stored. +{es} is allowed at a later time to delete data older than this time period. + +[discrete] +[[data-streams-lifecycle-how-it-works]] +=== How does it work? + +In intervals configured by <>, {es} goes over +each data stream and performs the following steps: + +1. Checks if the data stream has a data lifecycle configured, skipping any indices not part of a managed data stream. +2. Rolls over the write index of the data stream, if it fulfills the conditions defined by +<>. +3. Applies retention to the remaining backing indices. This means deleting the backing indices whose +`generation_time` is longer than the configured retention period. The `generation_time` is only applicable to rolled over backing +indices and it is either the time since the backing index got rolled over, or the time optionally configured in the +<> setting. + +IMPORTANT: We use the `generation_time` instead of the creation time because this ensures that all data in the backing +index have passed the retention period. As a result, the retention period is not the exact time data gets deleted, but +the minimum time data will be stored. + +NOTE: The steps `2` and `3` apply only to backing indices that are not already managed by {ilm-init}, meaning that these indices either do +not have an {ilm-init} policy defined, or if they do, they have <> +set to `false`. + +[discrete] +[[data-stream-lifecycle-configuration]] +=== Configuring data stream lifecycle + +Since the lifecycle is configured on the data stream level, the process to configure a lifecycle on a new data stream and +on an existing one differ. + +In the following sections, we will go through the following tutorials: + +* To create a new data stream with a lifecycle, you need to add the data lifecycle as part of the index template +that matches the name of your data stream (see <>). When a write operation +with the name of your data stream reaches {es} then the data stream will be created with the respective data lifecycle. +* To update the lifecycle of an existing data stream you need to use the <> +to edit the lifecycle on the data stream itself (see <>). + +NOTE: Updating the data lifecycle of an existing data stream is different from updating the settings or the mapping, +because it is applied on the data stream level and not on the individual backing indices. + +include::tutorial-manage-new-data-stream.asciidoc[] + +include::tutorial-manage-existing-data-stream.asciidoc[] \ No newline at end of file diff --git a/docs/reference/data-streams/lifecycle/tutorial-manage-existing-data-stream.asciidoc b/docs/reference/data-streams/lifecycle/tutorial-manage-existing-data-stream.asciidoc new file mode 100644 index 000000000000..31d6fce2c529 --- /dev/null +++ b/docs/reference/data-streams/lifecycle/tutorial-manage-existing-data-stream.asciidoc @@ -0,0 +1,136 @@ +[role="xpack"] +[[tutorial-manage-existing-data-stream]] +=== Tutorial: Update existing data stream + +preview::[] + +To update the lifecycle of an existing data stream you do the following actions: + +. <> +. <> + +[discrete] +[[set-lifecycle]] +==== Set a data stream's lifecycle + +To add or to change the retention period of your data stream you can use the <>. + +* You can set infinite retention period, meaning that your data should never be deleted. For example: ++ +[source,console] +---- +PUT _data_stream/my-data-stream/_lifecycle +{ } <1> +---- +// TEST[setup:my_data_stream] +<1> An empty payload means that your data stream is still managed but the data will never be deleted. Managing a time +series data stream such as logs or metrics enables {es} to better store your data even if you do not use a retention period. + +* Or you can set the retention period of your choice. For example: ++ +[source,console] +---- +PUT _data_stream/my-data-stream/_lifecycle +{ + "data_retention": "30d" <1> +} +---- +// TEST[continued] +<1> The retention period of this data stream is set to 30 days. This means that {es} is allowed to delete data that is +older than 30 days at its own discretion. + +The changes in the lifecycle are applied on all backing indices of the data stream. You can see the effect of the change +via the <>: + +[source,console] +-------------------------------------------------- +GET .ds-my-data-stream-*/_lifecycle/explain +-------------------------------------------------- +// TEST[continued] + +The response will look like: + +[source,console-result] +-------------------------------------------------- +{ + "indices": { + ".ds-my-data-stream-2023.04.19-000002": { + "index": ".ds-my-data-stream-2023.04.19-000002", <1> + "managed_by_lifecycle": true, <2> + "index_creation_date_millis": 1681919221417, + "time_since_index_creation": "6.85s", <3> + "lifecycle": { + "data_retention": "30d" <4> + } + }, + ".ds-my-data-stream-2023.04.17-000001": { + "index": ".ds-my-data-stream-2023.04.17-000001", <5> + "managed_by_lifecycle": true, <6> + "index_creation_date_millis": 1681745209501, + "time_since_index_creation": "48d", <7> + "rollover_date_millis": 1681919221419, + "time_since_rollover": "6.84s", <8> + "generation_time": "6.84s", <9> + "lifecycle": { + "data_retention": "30d" <10> + } + } + } +} +-------------------------------------------------- +// TEST[continued] +// TESTRESPONSE[skip:the result is for illustrating purposes only] +<1> The name of the backing index. +<2> This index is managed by a data stream lifecycle. +<3> The time that has passed since this index has been created. +<4> The data retention for this index is at least 30 days, as it was recently updated. +<5> The name of the backing index. +<6> This index is managed by the built-in data stream lifecycle. +<7> The time that has passed since this index has been created. +<8> The time that has passed since this index was <>. +<9> The time that will be used to determine when it's safe to delete this index and all its data. +<10> The data retention for this index as well is at least 30 days, as it was recently updated. + +[discrete] +[[delete-lifecycle]] +==== Remove lifecycle for a data stream + +To remove the lifecycle of a data stream you can use the <>. As consequence, +the maintenance operations that were applied by the lifecycle will no longer be applied to the data stream and all its +backing indices. For example: + +[source,console] +-------------------------------------------------- +DELETE _data_stream/my-data-stream/_lifecycle +-------------------------------------------------- +// TEST[continued] + +You can then use the <> again to see that the indices are no longer managed. + +[source,console] +-------------------------------------------------- +GET .ds-my-data-stream-*/_lifecycle/explain +-------------------------------------------------- +// TEST[continued] +// TEST[teardown:data_stream_cleanup] + +[source,console-result] +-------------------------------------------------- +{ + "indices": { + ".ds-my-data-stream-2023.04.19-000002": { + "index": ".ds-my-data-stream-2023.04.19-000002", <1> + "managed_by_lifecycle": false <2> + }, + ".ds-my-data-stream-2023.04.17-000001": { + "index": ".ds-my-data-stream-2023.04.19-000001", <3> + "managed_by_lifecycle": false <4> + } + } +} +-------------------------------------------------- +// TESTRESPONSE[skip:the result is for illustrating purposes only] +<1> The name of the backing index. +<2> Indication that the index is not managed by the data stream lifecycle. +<3> The name of another backing index. +<4> Indication that the index is not managed by the data stream lifecycle. diff --git a/docs/reference/data-streams/lifecycle/tutorial-manage-new-data-stream.asciidoc b/docs/reference/data-streams/lifecycle/tutorial-manage-new-data-stream.asciidoc new file mode 100644 index 000000000000..5d72709e4766 --- /dev/null +++ b/docs/reference/data-streams/lifecycle/tutorial-manage-new-data-stream.asciidoc @@ -0,0 +1,148 @@ +[role="xpack"] +[[tutorial-manage-new-data-stream]] +=== Tutorial: Create a data stream with a lifecycle + +preview::[] + +To create a data stream with a built-in lifecycle, follow these steps: + +. <> +. <> +. <> + +[discrete] +[[create-index-template-with-lifecycle]] +==== Create an index template + +A data stream requires a matching <>. You can configure the data stream lifecycle by +setting the `lifecycle` field in the index template the same as you do for mappings and index settings. You can define an +index template that sets a lifecycle as follows: + +* Include the `data_stream` object to enable data streams. + +* Define the lifecycle in the template section or include a composable template that defines the lifecycle. + +* Use a priority higher than `200` to avoid collisions with built-in templates. +See <>. + +You can use the <>. + +[source,console] +-------------------------------------------------- +PUT _index_template/my-index-template +{ + "index_patterns": ["my-data-stream*"], + "data_stream": { }, + "priority": 500, + "template": { + "lifecycle": { + "data_retention": "7d" + } + }, + "_meta": { + "description": "Template with data stream lifecycle" + } +} +-------------------------------------------------- + +[discrete] +[[create-data-stream-with-lifecycle]] +==== Create a data stream + +You can create a data stream in two ways: + +. By manually creating the stream using the <>. The stream's name must +still match one of your template's index patterns. ++ +[source,console] +-------------------------------------------------- +PUT _data_stream/my-data-stream +-------------------------------------------------- +// TEST[continued] + +. By <> that +target the stream's name. This name must match one of your index template's index patterns. ++ +[source,console] +-------------------------------------------------- +PUT my-data-stream/_bulk +{ "create":{ } } +{ "@timestamp": "2099-05-06T16:21:15.000Z", "message": "192.0.2.42 - - [06/May/2099:16:21:15 +0000] \"GET /images/bg.jpg HTTP/1.0\" 200 24736" } +{ "create":{ } } +{ "@timestamp": "2099-05-06T16:25:42.000Z", "message": "192.0.2.255 - - [06/May/2099:16:25:42 +0000] \"GET /favicon.ico HTTP/1.0\" 200 3638" } +-------------------------------------------------- +// TEST[continued] + +[discrete] +[[retrieve-lifecycle-information]] +==== Retrieve lifecycle information + +You can use the <> to see the data lifecycle of your data stream and +the <> to see the exact state of each backing index. + +[source,console] +-------------------------------------------------- +GET _data_stream/my-data-stream/_lifecycle +-------------------------------------------------- +// TEST[continued] + +The result will look like this: + +[source,console-result] +-------------------------------------------------- +{ + "data_streams": [ + { + "name": "my-data-stream",<1> + "lifecycle": { + "data_retention": "7d" <2> + } + } + ] +} +-------------------------------------------------- +<1> The name of your data stream. +<2> The retention period of the data indexed in this data stream, this means that the data in this data stream will +be kept at least for 7 days. After that {es} can delete it at its own discretion. + +If you want to see more information about how the data stream lifecycle is applied on individual backing indices use the +<>: + +[source,console] +-------------------------------------------------- +GET .ds-my-data-stream-*/_lifecycle/explain +-------------------------------------------------- +// TEST[continued] +The result will look like this: + +[source,console-result] +-------------------------------------------------- +{ + "indices": { + ".ds-my-data-stream-2023.04.19-000001": { + "index": ".ds-my-data-stream-2023.04.19-000001", <1> + "managed_by_lifecycle": true, <2> + "index_creation_date_millis": 1681918009501, + "time_since_index_creation": "1.6m", <3> + "lifecycle": { <4> + "data_retention": "7d" + } + } + } +} +-------------------------------------------------- +// TESTRESPONSE[skip:the result is for illustrating purposes only] +<1> The name of the backing index. +<2> If it is managed by the built-in data stream lifecycle. +<3> Time since the index was created. +<4> The lifecycle configuration that is applied on this backing index. + +////////////////////////// +[source,console] +-------------------------------------------------- +DELETE _data_stream/my-data-stream +DELETE _index_template/my-index-template +-------------------------------------------------- +// TEST[continued] + +////////////////////////// \ No newline at end of file diff --git a/docs/reference/dlm/apis/dlm-api.asciidoc b/docs/reference/dlm/apis/dlm-api.asciidoc deleted file mode 100644 index f2e4eaa34336..000000000000 --- a/docs/reference/dlm/apis/dlm-api.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -[[data-lifecycle-management-api]] -== Data Lifecycle Management APIs - -You use the following APIs to configure the data lifecycle management for data streams -and to retrieve lifecycle information for backing indices. - -[discrete] -[[dlm-api-management-endpoint]] -=== Operation management APIs - -* <> -* <> -* <> -* <> - -include::put-lifecycle.asciidoc[] -include::get-lifecycle.asciidoc[] -include::delete-lifecycle.asciidoc[] -include::explain-data-lifecycle.asciidoc[] - diff --git a/docs/reference/ilm/index-rollover.asciidoc b/docs/reference/ilm/index-rollover.asciidoc index 3755619a6f15..a1616807c9ea 100644 --- a/docs/reference/ilm/index-rollover.asciidoc +++ b/docs/reference/ilm/index-rollover.asciidoc @@ -42,7 +42,7 @@ On each rollover, the new index becomes the write index. [[ilm-automatic-rollover]] === Automatic rollover -{ilm-init} enables you to automatically roll over to a new index based +{ilm-init} and the data stream lifecycle (in preview:[]]) enable you to automatically roll over to a new index based on conditions like the index size, document count, or age. When a rollover is triggered, a new index is created, the write alias is updated to point to the new index, and all subsequent updates are written to the new index. diff --git a/docs/reference/indices/get-component-template.asciidoc b/docs/reference/indices/get-component-template.asciidoc index 9e2ac1a798f8..f3073406be2b 100644 --- a/docs/reference/indices/get-component-template.asciidoc +++ b/docs/reference/indices/get-component-template.asciidoc @@ -72,7 +72,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=local] include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout] `include_defaults`:: -(Optional, Boolean) Functionality in experimental:[]. If `true`, return all default settings in the response. +(Optional, Boolean) Functionality in preview:[]. If `true`, return all default settings in the response. Defaults to `false`. [[get-component-template-api-example]] diff --git a/docs/reference/indices/get-data-stream.asciidoc b/docs/reference/indices/get-data-stream.asciidoc index 255d2c803984..2b7a1646b800 100644 --- a/docs/reference/indices/get-data-stream.asciidoc +++ b/docs/reference/indices/get-data-stream.asciidoc @@ -100,7 +100,7 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=ds-expand-wildcards] Defaults to `open`. `include_defaults`:: -(Optional, Boolean) Functionality in experimental:[]. If `true`, return all default settings in the response. +(Optional, Boolean) Functionality in preview:[]. If `true`, return all default settings in the response. Defaults to `false`. [role="child_attributes"] @@ -223,7 +223,7 @@ cluster can not write into this data stream or change its mappings. `lifecycle`:: (object) -Functionality in experimental:[]. Contains the configuration for the data lifecycle management of this data stream. +Functionality in preview:[]. Contains the configuration for the data lifecycle management of this data stream. + .Properties of `lifecycle` [%collapsible%open] diff --git a/docs/reference/indices/get-index-template.asciidoc b/docs/reference/indices/get-index-template.asciidoc index 1752b19e59d9..9ae8af6f8441 100644 --- a/docs/reference/indices/get-index-template.asciidoc +++ b/docs/reference/indices/get-index-template.asciidoc @@ -64,7 +64,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=local] include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout] `include_defaults`:: -(Optional, Boolean) Functionality in experimental:[]. If `true`, return all default settings in the response. +(Optional, Boolean) Functionality in preview:[]. If `true`, return all default settings in the response. Defaults to `false`. [[get-template-api-example]] diff --git a/docs/reference/indices/shard-stores.asciidoc b/docs/reference/indices/shard-stores.asciidoc index 316eb45866f4..79394fccd046 100644 --- a/docs/reference/indices/shard-stores.asciidoc +++ b/docs/reference/indices/shard-stores.asciidoc @@ -172,8 +172,8 @@ The API returns the following response: "attributes": {}, "roles": [...], "version": "8.10.0", - "minIndexVersion": "7000099", - "maxIndexVersion": "8100099" + "min_index_version": 7000099, + "max_index_version": 8100099 }, "allocation_id": "2iNySv_OQVePRX-yaRH_lQ", <4> "allocation" : "primary|replica|unused" <5> diff --git a/docs/reference/indices/simulate-index.asciidoc b/docs/reference/indices/simulate-index.asciidoc index d4c446a58eec..5e5709a2d82f 100644 --- a/docs/reference/indices/simulate-index.asciidoc +++ b/docs/reference/indices/simulate-index.asciidoc @@ -63,7 +63,7 @@ Name of the index to simulate. include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=master-timeout] `include_defaults`:: -(Optional, Boolean) Functionality in experimental:[]. If `true`, return all default settings in the response. +(Optional, Boolean) Functionality in preview:[]. If `true`, return all default settings in the response. Defaults to `false`. [role="child_attributes"] diff --git a/docs/reference/indices/simulate-template.asciidoc b/docs/reference/indices/simulate-template.asciidoc index e876b2b9c519..404aa70d72e7 100644 --- a/docs/reference/indices/simulate-template.asciidoc +++ b/docs/reference/indices/simulate-template.asciidoc @@ -95,7 +95,7 @@ Defaults to `false`. include::{es-ref-dir}/rest-api/common-parms.asciidoc[tag=master-timeout] `include_defaults`:: -(Optional, Boolean) Functionality in experimental:[]. If `true`, return all default settings in the response. +(Optional, Boolean) Functionality in preview:[]. If `true`, return all default settings in the response. Defaults to `false`. [role="child_attributes"] diff --git a/docs/reference/migration/apis/feature-migration.asciidoc b/docs/reference/migration/apis/feature-migration.asciidoc index ab314ff79eab..87903fbb7758 100644 --- a/docs/reference/migration/apis/feature-migration.asciidoc +++ b/docs/reference/migration/apis/feature-migration.asciidoc @@ -56,85 +56,85 @@ Example response: "features" : [ { "feature_name" : "async_search", - "minimum_index_version" : "{version}", + "minimum_index_version" : "8100099", "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "enrich", - "minimum_index_version" : "{version}", + "minimum_index_version" : "8100099", "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "ent_search", - "minimum_index_version" : "{version}", + "minimum_index_version" : "8100099", "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "fleet", - "minimum_index_version" : "{version}", + "minimum_index_version" : "8100099", "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "geoip", - "minimum_index_version" : "{version}", + "minimum_index_version" : "8100099", "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "kibana", - "minimum_index_version" : "{version}", + "minimum_index_version" : "8100099", "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "logstash_management", - "minimum_index_version" : "{version}", + "minimum_index_version" : "8100099", "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "machine_learning", - "minimum_index_version" : "{version}", + "minimum_index_version" : "8100099", "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "searchable_snapshots", - "minimum_index_version" : "{version}", + "minimum_index_version" : "8100099", "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "security", - "minimum_index_version" : "{version}", + "minimum_index_version" : "8100099", "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "synonyms", - "minimum_index_version" : "{version}", + "minimum_index_version" : "8100099", "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "tasks", - "minimum_index_version" : "{version}", + "minimum_index_version" : "8100099", "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "transform", - "minimum_index_version" : "{version}", + "minimum_index_version" : "8100099", "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "watcher", - "minimum_index_version" : "{version}", + "minimum_index_version" : "8100099", "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] } @@ -142,7 +142,7 @@ Example response: "migration_status" : "NO_MIGRATION_NEEDED" } -------------------------------------------------- - +// TESTRESPONSE[s/"minimum_index_version" : "8100099"/"minimum_index_version" : $body.$_path/] When you submit a POST request to the `_migration/system_features` endpoint to start the migration process, the response indicates what features will be diff --git a/docs/reference/modules/remote-clusters.asciidoc b/docs/reference/modules/remote-clusters.asciidoc index 9fc19576a9dd..9feffa179378 100644 --- a/docs/reference/modules/remote-clusters.asciidoc +++ b/docs/reference/modules/remote-clusters.asciidoc @@ -10,10 +10,12 @@ _leader_ index is replicated to one or more read-only _follower_ indices on your configure disaster recovery, bring data closer to your users, or establish a centralized reporting cluster to process reports locally. -<> enables you to run a search request -against one or more remote clusters. This capability provides each region -with a global view of all clusters, allowing you to send a search request from -a local cluster and return results from all connected remote clusters. +<> enables you to run a search request +against one or more remote clusters. This capability provides each region with a +global view of all clusters, allowing you to send a search request from a local +cluster and return results from all connected remote clusters. For full {ccs} +capabilities, the local and remote cluster must be on the same +{subscriptions}[subscription level]. Enabling and configuring security is important on both local and remote clusters. When connecting a local cluster to remote clusters, an {es} superuser diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index 7000818aa934..47140e93ad98 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -1882,6 +1882,26 @@ Refer to <> for other Watcher examples. Refer to <>. +[role="exclude",id="dlm-delete-lifecycle"] +=== Delete the lifecycle of a data stream + +Refer to <>. + +[role="exclude",id="dlm-explain-lifecycle"] +=== Explain the lifecycle of a data stream + +Refer to <>. + +[role="exclude",id="dlm-get-lifecycle"] +=== Get the lifecycle of a data stream + +Refer to <>. + +[role="exclude",id="dlm-put-lifecycle"] +=== Update the lifecycle of a data stream + +Refer to <>. + [role="exclude",id="get-synonym-rule"] === Get synonym rule API diff --git a/docs/reference/rest-api/index.asciidoc b/docs/reference/rest-api/index.asciidoc index c67f83aa5f99..6245df668665 100644 --- a/docs/reference/rest-api/index.asciidoc +++ b/docs/reference/rest-api/index.asciidoc @@ -20,7 +20,6 @@ not be included yet. * <> * <> * <> -* <> * <> * <> * <> @@ -98,4 +97,3 @@ include::{es-repo-dir}/transform/apis/index.asciidoc[] include::usage.asciidoc[] include::{xes-repo-dir}/rest-api/watcher.asciidoc[] include::defs.asciidoc[] -include::{es-repo-dir}/dlm/apis/dlm-api.asciidoc[] diff --git a/docs/reference/search-application/apis/get-search-application.asciidoc b/docs/reference/search-application/apis/get-search-application.asciidoc index 7c809e6c0eca..0feba6145640 100644 --- a/docs/reference/search-application/apis/get-search-application.asciidoc +++ b/docs/reference/search-application/apis/get-search-application.asciidoc @@ -8,7 +8,10 @@ beta::[] Get Search Application ++++ -Retrieves information about a Search Application. +Retrieves information about a search application. + +If the search application has an inconsistent state between its alias and configured indices, a warning header will be returned with the response. +To resolve this inconsistent state, issue an updated <> command. [[get-search-application-request]] ==== {api-request-title} diff --git a/docs/reference/search-application/apis/search-application-search.asciidoc b/docs/reference/search-application/apis/search-application-search.asciidoc index 8c9d750d089c..eca006353e94 100644 --- a/docs/reference/search-application/apis/search-application-search.asciidoc +++ b/docs/reference/search-application/apis/search-application-search.asciidoc @@ -11,6 +11,9 @@ beta::[] Given specified query parameters, creates an Elasticsearch query to run. Any unspecified template parameters will be assigned their default values if applicable. +If the search application has an inconsistent state between its alias and configured indices, a warning header will be returned with the response. +To resolve this inconsistent state, issue an updated <> command. + [[search-application-search-request]] ==== {api-request-title} diff --git a/docs/reference/search/rrf.asciidoc b/docs/reference/search/rrf.asciidoc index b2a23e8cd725..2137358a00de 100644 --- a/docs/reference/search/rrf.asciidoc +++ b/docs/reference/search/rrf.asciidoc @@ -32,10 +32,11 @@ return score ==== Reciprocal rank fusion API You can use RRF as part of a <> to combine and rank -documents using multiple result sets from - -* 1 query and 1 or more kNN searches -* 2 or more kNN searches +documents using result sets from a combination of +<>, +<>, and/or +<>. A minimum of 2 results sets +is required for ranking from the specified sources. The `rrf` parameter is an optional object defined as part of a search request's <>. The `rrf` object contains the following @@ -95,8 +96,9 @@ truncated to `window_size`. If `k` is smaller than `window_size`, the results ar RRF does support: -* <> +* <> * <> +* <> RRF does not currently support: @@ -110,9 +112,61 @@ RRF does not currently support: * <> * <> -Using unsupported features as part of a search using RRF will result +Using unsupported features as part of a search with RRF results in an exception. +[[rrf-using-sub-searches]] +==== Reciprocal rank fusion using sub searches + +<> provides a way to +combine and rank multiple searches using RRF. + +An example request using RRF with sub searches: + +[source,console] +---- +GET example-index/_search +{ + "sub_searches": [ + { + "query": { + "term": { + "text": "blue shoes sale" + } + } + }, + { + "query": { + "text_expansion":{ + "ml.tokens":{ + "model_id":"my_elser_model", + "model_text":"What blue shoes are on sale?" + } + } + } + } + ], + "rank": { + "rrf": { + "window_size": 50, + "rank_constant": 20 + } + } +} +---- +// TEST[skip:example fragment] + +In the above example, we execute each of the two sub searches +independently of each other. First we run the term query for +`blue shoes sales` using the standard BM25 scoring algorithm. Then +we run the text expansion query for `What blue shoes are on sale?` +using our <> scoring algorithm. +RRF allows us to combine the two results sets generated by completely +independent scoring algorithms with equal weighting. Not only does this +remove the need to figure out what the appropriate weighting would be +using linear combination, but RRF is also shown to give improved +relevance over either query individually. + [[rrf-full-example]] ==== Reciprocal rank fusion full example diff --git a/docs/reference/search/search-your-data/search-across-clusters.asciidoc b/docs/reference/search/search-your-data/search-across-clusters.asciidoc index 8f634bee2645..bdaa703d98d5 100644 --- a/docs/reference/search/search-your-data/search-across-clusters.asciidoc +++ b/docs/reference/search/search-your-data/search-across-clusters.asciidoc @@ -32,6 +32,9 @@ run {es} on your own hardware, see <>. To ensure your remote cluster configuration supports {ccs}, see <>. +* For full {ccs} capabilities, the local and remote cluster must be on the same +{subscriptions}[subscription level]. + * The local coordinating node must have the <> node role. diff --git a/docs/reference/search/search.asciidoc b/docs/reference/search/search.asciidoc index 96c2c4dd8652..72f316cdd972 100644 --- a/docs/reference/search/search.asciidoc +++ b/docs/reference/search/search.asciidoc @@ -556,10 +556,10 @@ Period of time used to extend the life of the PIT. [[request-body-rank]] `rank`:: (Optional, object) Defines a method for combining and ranking result sets from -either: -+ -* 1 query and 1 or more kNN searches -* 2 or more kNN searches +a combination of <>, +<>, and/or +<>. Requires a minimum of 2 results sets for +ranking from the specified sources. + .Ranking methods [%collapsible%open] @@ -715,6 +715,29 @@ Stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the <>. +[[request-body-sub-searches]] +`sub_searches`:: +(Optional, array of objects) +An array of `sub_search` objects where each `sub_search` is evaluated +independently, and their result sets are later combined as part of +<>. Each `sub_search` object is required to +contain a single `query`. `sub_searches` is only allowed with the +<> element, and is not allowed in conjunction +with a top-level <> element. ++ +`sub_searches` as part of a search: +[source,js] +---- +{ + "sub_searches": [ + { "query": {...} }, + { "query": {...} } + ] + ... +} +---- +// NOTCONSOLE + include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=terminate_after] + Defaults to `0`, which does not terminate query execution early. diff --git a/docs/reference/settings/data-stream-lifecycle-settings.asciidoc b/docs/reference/settings/data-stream-lifecycle-settings.asciidoc new file mode 100644 index 000000000000..8ca60b75e282 --- /dev/null +++ b/docs/reference/settings/data-stream-lifecycle-settings.asciidoc @@ -0,0 +1,51 @@ +[role="xpack"] +[[data-stream-lifecycle-settings]] +=== Data stream lifecycle settings in {es} +[subs="attributes"] +++++ +Data stream lifecycle settings +++++ + +preview::[] + +These are the settings available for configuring <>. + +==== Cluster level settings + +[[data-streams-lifecycle-poll-interval]] +`data_streams.lifecycle.poll_interval`:: +(<>, <>) +How often {es} checks what is the next action for all data streams with a built-in lifecycle. Defaults to `10m`. + +[[cluster-lifecycle-default-rollover]] +`cluster.lifecycle.default.rollover`:: +(<>, string) +This property accepts a key value pair formatted string and configures the conditions that would trigger a data stream +to <> when it has `lifecycle` configured. This property is an implementation detail and subject to +change. Currently, it defaults to `max_age=auto,max_primary_shard_size=50gb,min_docs=1,max_primary_shard_docs=200000000`, +this means that your data stream will rollover if any of the following conditions are met: + +* Either any primary shard reaches the size of 50GB, +* or any primary shard contains 200.000.000 documents +* or the index reaches a certain age which depends on the retention time of your data stream, +* **and** has at least one document. + +==== Index level settings +The following index-level settings are typically configured on the backing indices of a data stream. + +[[index-lifecycle-prefer-ilm]] +`index.lifecycle.prefer_ilm`:: +(<>, boolean) +This setting determines which feature is managing the backing index of a data stream if, and only if, the backing index +has an <> ({ilm-init}) policy and the data stream has also a built-in lifecycle. When +`true` this index is managed by {ilm-init}, when `false` the backing index is managed by the data stream lifecycle. +Defaults to `true`. + +[[index-data-stream-lifecycle-origination-date]] +`index.lifecycle.origination_date`:: +(<>, long) +If specified, this is the timestamp used to calculate the backing index generation age after this backing index has been +<>. The generation age is used to determine data retention, consequently, you can use this +setting if you create a backing index that contains older data and want to ensure that the retention period or +other parts of the lifecycle will be applied based on the data's original timestamp and not the timestamp they got +indexed. Specified as a Unix epoch value in milliseconds. \ No newline at end of file diff --git a/docs/reference/setup.asciidoc b/docs/reference/setup.asciidoc index 15482679a306..e007b67a943b 100644 --- a/docs/reference/setup.asciidoc +++ b/docs/reference/setup.asciidoc @@ -54,6 +54,8 @@ include::settings/health-diagnostic-settings.asciidoc[] include::settings/ilm-settings.asciidoc[] +include::settings/data-stream-lifecycle-settings.asciidoc[] + include::modules/indices/index_management.asciidoc[] include::modules/indices/recovery.asciidoc[] diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 6c270adc293c..7d0b05a39278 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -521,16 +521,16 @@ - - - - - + + + + + @@ -791,16 +791,16 @@ + + + + + - - - - - diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/AbstractObjectParser.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/AbstractObjectParser.java index 30841ee36d41..32347297fea7 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/AbstractObjectParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/AbstractObjectParser.java @@ -403,7 +403,7 @@ public void declareFieldArray( */ public abstract void declareExclusiveFieldSet(String... exclusiveSet); - private static List parseArray(XContentParser parser, Context context, ContextParser itemParser) + public static List parseArray(XContentParser parser, Context context, ContextParser itemParser) throws IOException { final XContentParser.Token currentToken = parser.currentToken(); if (currentToken.isValue() diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index 0ceb4cb50ce3..3d7870dec5f2 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -121,6 +121,7 @@ import org.elasticsearch.index.analysis.PreConfiguredTokenizer; import org.elasticsearch.index.analysis.TokenFilterFactory; import org.elasticsearch.index.analysis.TokenizerFactory; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.analysis.AnalysisModule.AnalysisProvider; import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy; import org.elasticsearch.lucene.analysis.miscellaneous.DisableGraphAttribute; @@ -170,7 +171,8 @@ public Collection createComponents( IndexNameExpressionResolver expressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { this.scriptServiceHolder.set(scriptService); this.synonymsManagementServiceHolder.set(new SynonymsManagementAPIService(client)); diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PredicateTokenScriptFilterTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PredicateTokenScriptFilterTests.java index b3a0bbfea15f..c55f8d36ee4f 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PredicateTokenScriptFilterTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PredicateTokenScriptFilterTests.java @@ -67,7 +67,7 @@ public FactoryType compile(Script script, ScriptContext FactoryType compile(Script script, ScriptContext createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer unused, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { final APMTracer apmTracer = tracer.get(); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java index b07ab4c5d5c0..f67e80766aba 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java @@ -46,6 +46,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.IndexSettingProvider; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; @@ -117,7 +118,8 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { var service = new UpdateTimeSeriesRangeService(environment.settings(), threadPool, clusterService); this.service.set(service); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java index 05d33304e507..324b70aee080 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java @@ -220,7 +220,7 @@ public void setup() throws Exception { null, ScriptCompiler.NONE, false, - IndexVersion.CURRENT + IndexVersion.current() ).build(MapperBuilderContext.root(false)); RootObjectMapper.Builder root = new RootObjectMapper.Builder("_doc", ObjectMapper.Defaults.SUBOBJECTS); root.add( @@ -230,7 +230,7 @@ public void setup() throws Exception { DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, ScriptCompiler.NONE, true, - IndexVersion.CURRENT + IndexVersion.current() ) ); MetadataFieldMapper dtfm = DataStreamTestHelper.getDataStreamTimestampFieldMapper(); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java index 9071812e965e..e60b5daab737 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.datastreams; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.ComponentTemplate; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; @@ -20,14 +21,18 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettingProviders; import org.elasticsearch.indices.EmptySystemIndices; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.InvalidIndexTemplateException; import org.elasticsearch.indices.ShardLimitValidator; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.test.ESSingleNodeTestCase; +import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Set; @@ -151,34 +156,33 @@ public void testLifecycleComposition() { } // One lifecycle results to this lifecycle as the final { - DataLifecycle lifecycle = switch (randomInt(2)) { - case 0 -> new DataLifecycle(); - case 1 -> new DataLifecycle(DataLifecycle.Retention.NULL); - default -> new DataLifecycle(randomMillisUpToYear9999()); - }; + DataLifecycle lifecycle = new DataLifecycle(randomRetention(), randomDownsampling()); List lifecycles = List.of(lifecycle); - assertThat(composeDataLifecycles(lifecycles), equalTo(lifecycle)); + DataLifecycle result = composeDataLifecycles(lifecycles); + assertThat(result.getEffectiveDataRetention(), equalTo(lifecycle.getEffectiveDataRetention())); + assertThat(result.getDownsamplingRounds(), equalTo(lifecycle.getDownsamplingRounds())); } // If the last lifecycle is missing a property we keep the latest from the previous ones { - DataLifecycle lifecycleWithRetention = new DataLifecycle(randomMillisUpToYear9999()); - List lifecycles = List.of(lifecycleWithRetention, new DataLifecycle()); - assertThat( - composeDataLifecycles(lifecycles).getEffectiveDataRetention(), - equalTo(lifecycleWithRetention.getEffectiveDataRetention()) - ); + DataLifecycle lifecycle = new DataLifecycle(randomNonEmptyRetention(), randomNonEmptyDownsampling()); + List lifecycles = List.of(lifecycle, new DataLifecycle()); + DataLifecycle result = composeDataLifecycles(lifecycles); + assertThat(result.getEffectiveDataRetention(), equalTo(lifecycle.getEffectiveDataRetention())); + assertThat(result.getDownsamplingRounds(), equalTo(lifecycle.getDownsamplingRounds())); } // If both lifecycle have all properties, then the latest one overwrites all the others { - DataLifecycle lifecycle1 = new DataLifecycle(randomMillisUpToYear9999()); - DataLifecycle lifecycle2 = new DataLifecycle(randomMillisUpToYear9999()); + DataLifecycle lifecycle1 = new DataLifecycle(randomNonEmptyRetention(), randomNonEmptyDownsampling()); + DataLifecycle lifecycle2 = new DataLifecycle(randomNonEmptyRetention(), randomNonEmptyDownsampling()); List lifecycles = List.of(lifecycle1, lifecycle2); - assertThat(composeDataLifecycles(lifecycles), equalTo(lifecycle2)); + DataLifecycle result = composeDataLifecycles(lifecycles); + assertThat(result.getEffectiveDataRetention(), equalTo(lifecycle2.getEffectiveDataRetention())); + assertThat(result.getDownsamplingRounds(), equalTo(lifecycle2.getDownsamplingRounds())); } // If the last lifecycle is explicitly null, the result is also null { - DataLifecycle lifecycle1 = new DataLifecycle(randomMillisUpToYear9999()); - DataLifecycle lifecycle2 = new DataLifecycle(randomMillisUpToYear9999()); + DataLifecycle lifecycle1 = new DataLifecycle(randomNonEmptyRetention(), randomNonEmptyDownsampling()); + DataLifecycle lifecycle2 = new DataLifecycle(randomNonEmptyRetention(), randomNonEmptyDownsampling()); List lifecycles = List.of(lifecycle1, lifecycle2, Template.NO_LIFECYCLE); assertThat(composeDataLifecycles(lifecycles), nullValue()); } @@ -224,4 +228,49 @@ public static ShardLimitValidator createTestShardLimitService(int maxShardsPerNo return new ShardLimitValidator(limitOnlySettings, clusterService); } + @Nullable + private static DataLifecycle.Retention randomRetention() { + return switch (randomInt(2)) { + case 0 -> null; + case 1 -> DataLifecycle.Retention.NULL; + default -> randomNonEmptyRetention(); + }; + } + + private static DataLifecycle.Retention randomNonEmptyRetention() { + return new DataLifecycle.Retention(TimeValue.timeValueMillis(randomMillisUpToYear9999())); + } + + @Nullable + private static DataLifecycle.Downsampling randomDownsampling() { + return switch (randomInt(2)) { + case 0 -> null; + case 1 -> DataLifecycle.Downsampling.NULL; + default -> randomNonEmptyDownsampling(); + }; + } + + private static DataLifecycle.Downsampling randomNonEmptyDownsampling() { + var count = randomIntBetween(0, 10); + List rounds = new ArrayList<>(); + var previous = new DataLifecycle.Downsampling.Round( + TimeValue.timeValueDays(randomIntBetween(1, 365)), + new DownsampleConfig(new DateHistogramInterval(randomIntBetween(1, 24) + "h")) + ); + rounds.add(previous); + for (int i = 0; i < count; i++) { + DataLifecycle.Downsampling.Round round = nextRound(previous); + rounds.add(round); + previous = round; + } + return new DataLifecycle.Downsampling(rounds); + } + + private static DataLifecycle.Downsampling.Round nextRound(DataLifecycle.Downsampling.Round previous) { + var after = TimeValue.timeValueDays(previous.after().days() + randomIntBetween(1, 10)); + var fixedInterval = new DownsampleConfig( + new DateHistogramInterval((previous.config().getFixedInterval().estimateMillis() * randomIntBetween(2, 5)) + "ms") + ); + return new DataLifecycle.Downsampling.Round(after, fixedInterval); + } } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/mapper/DataStreamTimestampFieldMapperTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/mapper/DataStreamTimestampFieldMapperTests.java index 7c6f29bfb21e..ab75aa3c1ca5 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/mapper/DataStreamTimestampFieldMapperTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/mapper/DataStreamTimestampFieldMapperTests.java @@ -170,7 +170,7 @@ public void testValidateDefaultIgnoreMalformed() throws Exception { Settings indexSettings = Settings.builder().put(FieldMapper.IGNORE_MALFORMED_SETTING.getKey(), true).build(); Exception e = expectThrows( IllegalArgumentException.class, - () -> createMapperService(IndexVersion.CURRENT, indexSettings, () -> true, timestampMapping(true, b -> { + () -> createMapperService(IndexVersion.current(), indexSettings, () -> true, timestampMapping(true, b -> { b.startObject("@timestamp"); b.field("type", "date"); b.endObject(); @@ -181,7 +181,7 @@ public void testValidateDefaultIgnoreMalformed() throws Exception { equalTo("data stream timestamp field [@timestamp] has disallowed [ignore_malformed] attribute specified") ); - MapperService mapperService = createMapperService(IndexVersion.CURRENT, indexSettings, () -> true, timestampMapping(true, b -> { + MapperService mapperService = createMapperService(IndexVersion.current(), indexSettings, () -> true, timestampMapping(true, b -> { b.startObject("@timestamp"); b.field("type", "date"); b.field("ignore_malformed", false); diff --git a/modules/dlm/src/internalClusterTest/java/org/elasticsearch/dlm/CrudDataLifecycleIT.java b/modules/dlm/src/internalClusterTest/java/org/elasticsearch/dlm/CrudDataLifecycleIT.java index c161bd62aed8..e1326e011721 100644 --- a/modules/dlm/src/internalClusterTest/java/org/elasticsearch/dlm/CrudDataLifecycleIT.java +++ b/modules/dlm/src/internalClusterTest/java/org/elasticsearch/dlm/CrudDataLifecycleIT.java @@ -24,7 +24,7 @@ import java.util.List; import static org.elasticsearch.dlm.DLMFixtures.putComposableIndexTemplate; -import static org.elasticsearch.dlm.DLMFixtures.randomDataLifecycle; +import static org.elasticsearch.dlm.DLMFixtures.randomLifecycle; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -42,7 +42,7 @@ protected boolean ignoreExternalCluster() { } public void testGetLifecycle() throws Exception { - DataLifecycle lifecycle = randomDataLifecycle(); + DataLifecycle lifecycle = randomLifecycle(); putComposableIndexTemplate("id1", null, List.of("with-lifecycle*"), null, null, lifecycle); putComposableIndexTemplate("id2", null, List.of("without-lifecycle*"), null, null, null); { diff --git a/modules/dlm/src/main/java/org/elasticsearch/dlm/DataLifecyclePlugin.java b/modules/dlm/src/main/java/org/elasticsearch/dlm/DataLifecyclePlugin.java index 5035bb157a0c..8ee0e09319f8 100644 --- a/modules/dlm/src/main/java/org/elasticsearch/dlm/DataLifecyclePlugin.java +++ b/modules/dlm/src/main/java/org/elasticsearch/dlm/DataLifecyclePlugin.java @@ -40,6 +40,7 @@ import org.elasticsearch.dlm.rest.RestPutDataLifecycleAction; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; @@ -58,7 +59,7 @@ import java.util.List; import java.util.function.Supplier; -import static org.elasticsearch.cluster.metadata.DataLifecycle.DLM_ORIGIN; +import static org.elasticsearch.cluster.metadata.DataLifecycle.DATA_STREAM_LIFECYCLE_ORIGIN; /** * Plugin encapsulating Data Lifecycle Management Service. @@ -97,7 +98,8 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { if (DataLifecycle.isEnabled() == false) { return List.of(); @@ -107,7 +109,7 @@ public Collection createComponents( dataLifecycleInitialisationService.set( new DataLifecycleService( settings, - new OriginSettingClient(client, DLM_ORIGIN), + new OriginSettingClient(client, DATA_STREAM_LIFECYCLE_ORIGIN), clusterService, getClock(), threadPool, diff --git a/modules/dlm/src/test/java/org/elasticsearch/dlm/DLMFixtures.java b/modules/dlm/src/test/java/org/elasticsearch/dlm/DLMFixtures.java index f2ab7fbd5500..cc517d785283 100644 --- a/modules/dlm/src/test/java/org/elasticsearch/dlm/DLMFixtures.java +++ b/modules/dlm/src/test/java/org/elasticsearch/dlm/DLMFixtures.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition; import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataLifecycle; import org.elasticsearch.cluster.metadata.DataStream; @@ -22,16 +23,19 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Map; +import static org.apache.lucene.tests.util.LuceneTestCase.rarely; import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.newInstance; import static org.elasticsearch.test.ESIntegTestCase.client; import static org.elasticsearch.test.ESTestCase.randomInt; import static org.elasticsearch.test.ESTestCase.randomIntBetween; +import static org.elasticsearch.test.ESTestCase.randomMillisUpToYear9999; import static org.junit.Assert.assertTrue; /** @@ -93,12 +97,47 @@ static void putComposableIndexTemplate( assertTrue(client().execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet().isAcknowledged()); } - static DataLifecycle randomDataLifecycle() { - return switch (randomInt(3)) { - case 0 -> new DataLifecycle(); - case 1 -> new DataLifecycle(DataLifecycle.Retention.NULL); - case 2 -> Template.NO_LIFECYCLE; - default -> new DataLifecycle(TimeValue.timeValueDays(randomIntBetween(1, 365))); + static DataLifecycle randomLifecycle() { + return rarely() ? Template.NO_LIFECYCLE : new DataLifecycle(randomRetention(), randomDownsampling()); + } + + @Nullable + private static DataLifecycle.Retention randomRetention() { + return switch (randomInt(2)) { + case 0 -> null; + case 1 -> DataLifecycle.Retention.NULL; + default -> new DataLifecycle.Retention(TimeValue.timeValueMillis(randomMillisUpToYear9999())); }; } + + @Nullable + private static DataLifecycle.Downsampling randomDownsampling() { + return switch (randomInt(2)) { + case 0 -> null; + case 1 -> DataLifecycle.Downsampling.NULL; + default -> { + var count = randomIntBetween(0, 10); + List rounds = new ArrayList<>(); + var previous = new DataLifecycle.Downsampling.Round( + TimeValue.timeValueDays(randomIntBetween(1, 365)), + new DownsampleConfig(new DateHistogramInterval(randomIntBetween(1, 24) + "h")) + ); + rounds.add(previous); + for (int i = 0; i < count; i++) { + DataLifecycle.Downsampling.Round round = nextRound(previous); + rounds.add(round); + previous = round; + } + yield new DataLifecycle.Downsampling(rounds); + } + }; + } + + private static DataLifecycle.Downsampling.Round nextRound(DataLifecycle.Downsampling.Round previous) { + var after = TimeValue.timeValueDays(previous.after().days() + randomIntBetween(1, 10)); + var fixedInterval = new DownsampleConfig( + new DateHistogramInterval((previous.config().getFixedInterval().estimateMillis() * randomIntBetween(2, 5)) + "ms") + ); + return new DataLifecycle.Downsampling.Round(after, fixedInterval); + } } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java index 45d24fb75cda..0985f3490d22 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.ingest.IngestService; import org.elasticsearch.ingest.Processor; @@ -121,7 +122,8 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { try { String nodeId = nodeEnvironment.nodeId(); diff --git a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java index b09c406857a3..d32848b529fd 100644 --- a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java +++ b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java @@ -18,6 +18,7 @@ import org.elasticsearch.script.ScriptType; import org.elasticsearch.script.mustache.MultiSearchTemplateResponse.Item; import org.elasticsearch.search.DummyQueryParserPlugin; +import org.elasticsearch.search.FailBeforeCurrentVersionQueryBuilder; import org.elasticsearch.search.SearchService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xcontent.json.JsonXContent; @@ -200,9 +201,14 @@ public void testCCSCheckCompatibility() throws Exception { Exception ex = response.getFailure(); assertThat(ex.getMessage(), containsString("[class org.elasticsearch.action.search.SearchRequest] is not compatible with version")); assertThat(ex.getMessage(), containsString("'search.check_ccs_compatibility' setting is enabled.")); - assertEquals( - "This query isn't serializable with transport versions before " + TransportVersion.current(), - ex.getCause().getMessage() + + String expectedCause = Strings.format( + "[fail_before_current_version] was released first in version %s, failed compatibility " + + "check trying to send it to node with version %s", + FailBeforeCurrentVersionQueryBuilder.FUTURE_VERSION, + TransportVersion.MINIMUM_CCS_VERSION ); + String actualCause = ex.getCause().getMessage(); + assertEquals(expectedCause, actualCause); } } diff --git a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java index c2d48fccf27d..76605267e0e1 100644 --- a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java +++ b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.script.mustache; import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.TransportVersion; import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.search.SearchRequest; @@ -365,14 +364,22 @@ public void testCCSCheckCompatibility() throws Exception { ExecutionException.class, () -> client().execute(SearchTemplateAction.INSTANCE, request).get() ); + + Throwable primary = ex.getCause(); + assertNotNull(primary); + + Throwable underlying = primary.getCause(); + assertNotNull(underlying); + assertThat( - ex.getCause().getMessage(), + primary.getMessage(), containsString("[class org.elasticsearch.action.search.SearchRequest] is not compatible with version") ); - assertThat(ex.getCause().getMessage(), containsString("'search.check_ccs_compatibility' setting is enabled.")); - assertEquals( - "This query isn't serializable with transport versions before " + TransportVersion.current(), - ex.getCause().getCause().getMessage() - ); + assertThat(primary.getMessage(), containsString("'search.check_ccs_compatibility' setting is enabled.")); + + String expectedCause = "[fail_before_current_version] was released first in version XXXXXXX, failed compatibility check trying to" + + " send it to node with version XXXXXXX"; + String actualCause = underlying.getMessage().replaceAll("\\d{7,}", "XXXXXXX"); + assertEquals(expectedCause, actualCause); } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java index f3b7803bf2c5..f5b27a48960d 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.painless.action.PainlessContextAction; import org.elasticsearch.painless.action.PainlessExecuteAction; import org.elasticsearch.painless.spi.PainlessExtension; @@ -147,7 +148,8 @@ public Collection createComponents( IndexNameExpressionResolver expressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { // this is a hack to bind the painless script engine in guice (all components are added to guice), so that // the painless context api. this is a temporary measure until transport actions do no require guice diff --git a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoWKTShapeParserTests.java b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoWKTShapeParserTests.java index 16eb8966e680..1a006d76392d 100644 --- a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoWKTShapeParserTests.java +++ b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoWKTShapeParserTests.java @@ -293,7 +293,7 @@ public void testParseMixedDimensionPolyWithHole() throws IOException, ParseExcep XContentParser parser = createParser(xContentBuilder); parser.nextToken(); - final LegacyGeoShapeFieldMapper mapperBuilder = new LegacyGeoShapeFieldMapper.Builder("test", IndexVersion.CURRENT, false, true) + final LegacyGeoShapeFieldMapper mapperBuilder = new LegacyGeoShapeFieldMapper.Builder("test", IndexVersion.current(), false, true) .build(MapperBuilderContext.root(false)); // test store z disabled @@ -376,7 +376,7 @@ public void testParseOpenPolygon() throws IOException { final LegacyGeoShapeFieldMapper coercingMapperBuilder = new LegacyGeoShapeFieldMapper.Builder( "test", - IndexVersion.CURRENT, + IndexVersion.current(), false, true ).coerce(true).build(MapperBuilderContext.root(false)); diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java index 0d8429122190..d89c5db66f37 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java @@ -98,7 +98,7 @@ public static class Builder extends FieldMapper.Builder { private final TextParams.Analyzers analyzers; public Builder(String name, IndexAnalyzers indexAnalyzers) { - this(name, IndexVersion.CURRENT, indexAnalyzers); + this(name, IndexVersion.current(), indexAnalyzers); } public Builder(String name, IndexVersion indexCreatedVersion, IndexAnalyzers indexAnalyzers) { diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java index 10168aee9366..ca6e5a8078f6 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java @@ -368,7 +368,7 @@ public void testNonDefaultSimilarity() throws Exception { ScoreMode.None ); LateParsingQuery query = (LateParsingQuery) hasChildQueryBuilder.toQuery(searchExecutionContext); - Similarity expected = SimilarityService.BUILT_IN.get(similarity).apply(Settings.EMPTY, IndexVersion.CURRENT, null); + Similarity expected = SimilarityService.BUILT_IN.get(similarity).apply(Settings.EMPTY, IndexVersion.current(), null); assertThat(((PerFieldSimilarityWrapper) query.getSimilarity()).get("custom_string"), instanceOf(expected.getClass())); } diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java index 65f725b126f8..9b214f432418 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java @@ -827,7 +827,7 @@ public void testPercolateMatchAll() throws Exception { Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, - IndexVersion.CURRENT + IndexVersion.current() ); TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC)); assertEquals(3L, topDocs.totalHits.value); @@ -866,7 +866,7 @@ public void testFunctionScoreQuery() throws Exception { Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, - IndexVersion.CURRENT + IndexVersion.current() ); TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC)); assertEquals(2L, topDocs.totalHits.value); @@ -895,7 +895,7 @@ public void testPercolateSmallAndLargeDocument() throws Exception { IndexSearcher shardSearcher = newSearcher(directoryReader); shardSearcher.setQueryCache(null); - IndexVersion v = IndexVersion.CURRENT; + IndexVersion v = IndexVersion.current(); try (Directory directory = new ByteBuffersDirectory()) { try (IndexWriter iw = new IndexWriter(directory, newIndexWriterConfig())) { @@ -1018,7 +1018,7 @@ public void testDuplicatedClauses() throws Exception { IndexSearcher shardSearcher = newSearcher(directoryReader); shardSearcher.setQueryCache(null); - IndexVersion v = IndexVersion.CURRENT; + IndexVersion v = IndexVersion.current(); List sources = Collections.singletonList(new BytesArray("{}")); MemoryIndex memoryIndex = new MemoryIndex(); @@ -1052,7 +1052,7 @@ public void testDuplicatedClauses2() throws Exception { IndexSearcher shardSearcher = newSearcher(directoryReader); shardSearcher.setQueryCache(null); - IndexVersion v = IndexVersion.CURRENT; + IndexVersion v = IndexVersion.current(); List sources = Collections.singletonList(new BytesArray("{}")); MemoryIndex memoryIndex = new MemoryIndex(); @@ -1101,7 +1101,7 @@ public void testMsmAndRanges_disjunction() throws Exception { IndexSearcher shardSearcher = newSearcher(directoryReader); shardSearcher.setQueryCache(null); - IndexVersion v = IndexVersion.CURRENT; + IndexVersion v = IndexVersion.current(); List sources = Collections.singletonList(new BytesArray("{}")); Document document = new Document(); @@ -1125,7 +1125,7 @@ private void duelRun(PercolateQuery.QueryStore percolateQueryStore, MemoryIndex Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, - IndexVersion.CURRENT + IndexVersion.current() ); Query query = requireScore ? percolateQuery : new ConstantScoreQuery(percolateQuery); TopDocs topDocs = shardSearcher.search(query, 100); @@ -1211,7 +1211,7 @@ private TopDocs executeQuery(PercolateQuery.QueryStore percolateQueryStore, Memo Collections.singletonList(new BytesArray("{}")), percolateSearcher, false, - IndexVersion.CURRENT + IndexVersion.current() ); return shardSearcher.search(percolateQuery, 10); } diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java index e0424af9a4db..b65d966bd655 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java @@ -75,7 +75,7 @@ public void testHitsExecute() throws Exception { when(sc.query()).thenReturn(percolateQuery); SearchExecutionContext sec = mock(SearchExecutionContext.class); when(sc.getSearchExecutionContext()).thenReturn(sec); - when(sec.indexVersionCreated()).thenReturn(IndexVersion.CURRENT); + when(sec.indexVersionCreated()).thenReturn(IndexVersion.current()); FetchSubPhaseProcessor processor = phase.getProcessor(sc); assertNotNull(processor); @@ -106,7 +106,7 @@ public void testHitsExecute() throws Exception { when(sc.query()).thenReturn(percolateQuery); SearchExecutionContext sec = mock(SearchExecutionContext.class); when(sc.getSearchExecutionContext()).thenReturn(sec); - when(sec.indexVersionCreated()).thenReturn(IndexVersion.CURRENT); + when(sec.indexVersionCreated()).thenReturn(IndexVersion.current()); FetchSubPhaseProcessor processor = phase.getProcessor(sc); assertNotNull(processor); @@ -136,7 +136,7 @@ public void testHitsExecute() throws Exception { when(sc.query()).thenReturn(percolateQuery); SearchExecutionContext sec = mock(SearchExecutionContext.class); when(sc.getSearchExecutionContext()).thenReturn(sec); - when(sec.indexVersionCreated()).thenReturn(IndexVersion.CURRENT); + when(sec.indexVersionCreated()).thenReturn(IndexVersion.current()); FetchSubPhaseProcessor processor = phase.getProcessor(sc); assertNotNull(processor); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryBuilderStoreTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryBuilderStoreTests.java index e9d8e2a78b3a..6d6353b71706 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryBuilderStoreTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryBuilderStoreTests.java @@ -70,7 +70,7 @@ public void testStoringQueryBuilders() throws IOException { queryBuilders[i] = new TermQueryBuilder(randomAlphaOfLength(4), randomAlphaOfLength(8)); DocumentParserContext documentParserContext = new TestDocumentParserContext(); PercolatorFieldMapper.createQueryBuilderField( - IndexVersion.CURRENT, + IndexVersion.current(), TransportVersion.current(), fieldMapper, queryBuilders[i], @@ -81,7 +81,7 @@ public void testStoringQueryBuilders() throws IOException { } SearchExecutionContext searchExecutionContext = mock(SearchExecutionContext.class); - when(searchExecutionContext.indexVersionCreated()).thenReturn(IndexVersion.CURRENT); + when(searchExecutionContext.indexVersionCreated()).thenReturn(IndexVersion.current()); when(searchExecutionContext.getWriteableRegistry()).thenReturn(writableRegistry()); when(searchExecutionContext.getParserConfig()).thenReturn(parserConfig()); when(searchExecutionContext.getForField(fieldMapper.fieldType(), fielddataOperation)).thenReturn( diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/ReindexPlugin.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/ReindexPlugin.java index 30c416ddadfe..fdf672f3cfbf 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/ReindexPlugin.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/ReindexPlugin.java @@ -27,6 +27,7 @@ import org.elasticsearch.index.reindex.DeleteByQueryAction; import org.elasticsearch.index.reindex.ReindexAction; import org.elasticsearch.index.reindex.UpdateByQueryAction; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; @@ -100,7 +101,8 @@ public Collection createComponents( IndexNameExpressionResolver expressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { return Collections.singletonList(new ReindexSslConfig(environment.settings(), environment, resourceWatcherService)); } diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/RestRethrottleAction.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/RestRethrottleAction.java index ce20cc531998..052749d6f666 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/RestRethrottleAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/RestRethrottleAction.java @@ -12,14 +12,17 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.tasks.TaskId; import java.util.List; import java.util.function.Supplier; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.Scope.INTERNAL; import static org.elasticsearch.rest.action.admin.cluster.RestListTasksAction.listTasksResponseListener; +@ServerlessScope(INTERNAL) public class RestRethrottleAction extends BaseRestHandler { private final Supplier nodesInCluster; diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexFromRemoteWithAuthTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexFromRemoteWithAuthTests.java index 24d1e032501c..72546d98d1f1 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexFromRemoteWithAuthTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexFromRemoteWithAuthTests.java @@ -36,6 +36,7 @@ import org.elasticsearch.index.reindex.ReindexAction; import org.elasticsearch.index.reindex.ReindexRequestBuilder; import org.elasticsearch.index.reindex.RemoteInfo; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; @@ -176,7 +177,8 @@ public Collection createComponents( IndexNameExpressionResolver expressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { testFilter.set(new ReindexFromRemoteWithAuthTests.TestFilter(threadPool)); return Collections.emptyList(); diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java index fbfe486de6bc..dd70e4778dd7 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java @@ -22,6 +22,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.ReloadablePlugin; @@ -96,7 +97,8 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { AzureClientProvider azureClientProvider = AzureClientProvider.create(threadPool, settings); azureStoreService.set(createAzureStorageService(settings, azureClientProvider)); diff --git a/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java b/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java index 08fb3de0f058..3b5f1928871f 100644 --- a/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java +++ b/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java @@ -72,8 +72,8 @@ public void testReadSecuredSettings() { private AzureRepositoryPlugin pluginWithSettingsValidation(Settings settings) { final AzureRepositoryPlugin plugin = new AzureRepositoryPlugin(settings); - new SettingsModule(settings, plugin.getSettings(), Collections.emptyList(), Collections.emptySet()); - plugin.createComponents(null, null, threadPool, null, null, null, null, null, null, null, null, Tracer.NOOP, null); + new SettingsModule(settings, plugin.getSettings(), Collections.emptyList()); + plugin.createComponents(null, null, threadPool, null, null, null, null, null, null, null, null, Tracer.NOOP, null, null); return plugin; } diff --git a/modules/repository-gcs/build.gradle b/modules/repository-gcs/build.gradle index 86af9ac602d5..32236ab0fd8a 100644 --- a/modules/repository-gcs/build.gradle +++ b/modules/repository-gcs/build.gradle @@ -25,7 +25,7 @@ dependencies { api 'com.google.cloud:google-cloud-storage:2.13.1' api 'com.google.cloud:google-cloud-core:2.8.28' api 'com.google.cloud:google-cloud-core-http:2.8.28' - runtimeOnly 'com.google.guava:guava:31.1-jre' + runtimeOnly 'com.google.guava:guava:32.0.1-jre' runtimeOnly 'com.google.guava:failureaccess:1.0.1' api "commons-logging:commons-logging:${versions.commonslogging}" api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" @@ -95,7 +95,6 @@ tasks.named("thirdPartyAudit").configure { 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray', 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$1', 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$2', - 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$3', 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper', 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1', 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray', diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index 08b91f304a70..2b6193b52cc0 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -170,7 +170,7 @@ public void testEnforcedCooldownPeriod() throws IOException { fakeOldSnapshot, new RepositoryData.SnapshotDetails( SnapshotState.SUCCESS, - SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION.minimumCompatibilityVersion(), + SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION.minimumCompatibilityVersion().indexVersion, 0L, // -1 would refresh RepositoryData and find the real version 0L, // -1 would refresh RepositoryData and find the real version, "" // null would refresh RepositoryData and find the real version diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java index 3d4d426944db..b9644a54bffa 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.ReloadablePlugin; @@ -102,7 +103,8 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { service.set(s3Service(environment)); this.service.get().refreshAndClearCache(S3ClientSettings.load(settings)); diff --git a/modules/repository-url/src/main/java/org/elasticsearch/plugin/repository/url/URLRepositoryPlugin.java b/modules/repository-url/src/main/java/org/elasticsearch/plugin/repository/url/URLRepositoryPlugin.java index ace6555860f7..9f157e2d291c 100644 --- a/modules/repository-url/src/main/java/org/elasticsearch/plugin/repository/url/URLRepositoryPlugin.java +++ b/modules/repository-url/src/main/java/org/elasticsearch/plugin/repository/url/URLRepositoryPlugin.java @@ -20,6 +20,7 @@ import org.elasticsearch.core.IOUtils; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.RepositoryPlugin; @@ -88,7 +89,8 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { final URLHttpClient.Factory apacheURLHttpClientFactory = new URLHttpClient.Factory(); diff --git a/modules/rest-root/src/main/java/org/elasticsearch/rest/root/MainResponse.java b/modules/rest-root/src/main/java/org/elasticsearch/rest/root/MainResponse.java index 84f21129e02f..b54b7ac48948 100644 --- a/modules/rest-root/src/main/java/org/elasticsearch/rest/root/MainResponse.java +++ b/modules/rest-root/src/main/java/org/elasticsearch/rest/root/MainResponse.java @@ -113,7 +113,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws .field("lucene_version", version.luceneVersion().toString()) .field("minimum_wire_compatibility_version", version.minimumCompatibilityVersion().toString()) .field("minimum_index_compatibility_version", version.minimumIndexCompatibilityVersion().toString()) - .field("transport_version", transportVersion.toString()) + .field("transport_version", transportVersion != null ? transportVersion.toString() : "unknown") .endObject(); builder.field("tagline", "You Know, for Search"); builder.endObject(); diff --git a/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/RuntimeFieldsCommonPlugin.java b/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/RuntimeFieldsCommonPlugin.java index bc0b39e98999..76cf25266379 100644 --- a/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/RuntimeFieldsCommonPlugin.java +++ b/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/RuntimeFieldsCommonPlugin.java @@ -18,6 +18,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; @@ -76,7 +77,8 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { grokHelper.finishInitializing(threadPool); return List.of(); diff --git a/modules/systemd/src/main/java/org/elasticsearch/systemd/SystemdPlugin.java b/modules/systemd/src/main/java/org/elasticsearch/systemd/SystemdPlugin.java index 27bd1791dc4d..2528383ec133 100644 --- a/modules/systemd/src/main/java/org/elasticsearch/systemd/SystemdPlugin.java +++ b/modules/systemd/src/main/java/org/elasticsearch/systemd/SystemdPlugin.java @@ -20,6 +20,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.ClusterPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; @@ -91,7 +92,8 @@ public Collection createComponents( final IndexNameExpressionResolver expressionResolver, final Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { if (enabled == false) { extender.set(null); diff --git a/modules/systemd/src/test/java/org/elasticsearch/systemd/SystemdPluginTests.java b/modules/systemd/src/test/java/org/elasticsearch/systemd/SystemdPluginTests.java index cb95f7d7e848..d93c6eff3f06 100644 --- a/modules/systemd/src/test/java/org/elasticsearch/systemd/SystemdPluginTests.java +++ b/modules/systemd/src/test/java/org/elasticsearch/systemd/SystemdPluginTests.java @@ -53,28 +53,28 @@ public class SystemdPluginTests extends ESTestCase { public void testIsEnabled() { final SystemdPlugin plugin = new SystemdPlugin(false, randomPackageBuildType, Boolean.TRUE.toString()); - plugin.createComponents(null, null, threadPool, null, null, null, null, null, null, null, null, Tracer.NOOP, null); + plugin.createComponents(null, null, threadPool, null, null, null, null, null, null, null, null, Tracer.NOOP, null, null); assertTrue(plugin.isEnabled()); assertNotNull(plugin.extender()); } public void testIsNotPackageDistribution() { final SystemdPlugin plugin = new SystemdPlugin(false, randomNonPackageBuildType, Boolean.TRUE.toString()); - plugin.createComponents(null, null, threadPool, null, null, null, null, null, null, null, null, Tracer.NOOP, null); + plugin.createComponents(null, null, threadPool, null, null, null, null, null, null, null, null, Tracer.NOOP, null, null); assertFalse(plugin.isEnabled()); assertNull(plugin.extender()); } public void testIsImplicitlyNotEnabled() { final SystemdPlugin plugin = new SystemdPlugin(false, randomPackageBuildType, null); - plugin.createComponents(null, null, threadPool, null, null, null, null, null, null, null, null, Tracer.NOOP, null); + plugin.createComponents(null, null, threadPool, null, null, null, null, null, null, null, null, Tracer.NOOP, null, null); assertFalse(plugin.isEnabled()); assertNull(plugin.extender()); } public void testIsExplicitlyNotEnabled() { final SystemdPlugin plugin = new SystemdPlugin(false, randomPackageBuildType, Boolean.FALSE.toString()); - plugin.createComponents(null, null, threadPool, null, null, null, null, null, null, null, null, Tracer.NOOP, null); + plugin.createComponents(null, null, threadPool, null, null, null, null, null, null, null, null, Tracer.NOOP, null, null); assertFalse(plugin.isEnabled()); assertNull(plugin.extender()); } @@ -162,7 +162,7 @@ int sd_notify(final int unset_environment, final String state) { } }; - plugin.createComponents(null, null, threadPool, null, null, null, null, null, null, null, null, Tracer.NOOP, null); + plugin.createComponents(null, null, threadPool, null, null, null, null, null, null, null, null, Tracer.NOOP, null, null); if (Boolean.TRUE.toString().equals(esSDNotify)) { assertNotNull(plugin.extender()); } else { diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java index eec5bc43500a..028de6c98d43 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java @@ -10,10 +10,10 @@ import org.apache.lucene.util.Constants; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; @@ -101,8 +101,8 @@ public void testConnectException() throws UnknownHostException { public void testDefaultKeepAliveSettings() throws IOException { assumeTrue("setting default keepalive options not supported on this platform", (IOUtils.LINUX || IOUtils.MAC_OS_X)); try ( - MockTransportService serviceC = buildService("TS_C", Version.CURRENT, TransportVersion.current(), Settings.EMPTY); - MockTransportService serviceD = buildService("TS_D", Version.CURRENT, TransportVersion.current(), Settings.EMPTY) + MockTransportService serviceC = buildService("TS_C", VersionInformation.CURRENT, TransportVersion.current(), Settings.EMPTY); + MockTransportService serviceD = buildService("TS_D", VersionInformation.CURRENT, TransportVersion.current(), Settings.EMPTY) ) { try (Transport.Connection connection = openConnection(serviceC, serviceD.getLocalDiscoNode(), TestProfiles.LIGHT_PROFILE)) { @@ -145,8 +145,8 @@ public void testTransportProfile() { ); try ( - MockTransportService serviceC = buildService("TS_C", Version.CURRENT, TransportVersion.current(), Settings.EMPTY); - MockTransportService serviceD = buildService("TS_D", Version.CURRENT, TransportVersion.current(), Settings.EMPTY) + MockTransportService serviceC = buildService("TS_C", VersionInformation.CURRENT, TransportVersion.current(), Settings.EMPTY); + MockTransportService serviceD = buildService("TS_D", VersionInformation.CURRENT, TransportVersion.current(), Settings.EMPTY) ) { try (Transport.Connection connection = openConnection(serviceC, serviceD.getLocalDiscoNode(), connectionProfile)) { @@ -223,7 +223,7 @@ public void testTimeoutPerConnection() throws IOException { try ( TransportService service = buildService( "TS_TPC", - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), null, Settings.EMPTY, diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle index 75a8095ef412..530070f9e007 100644 --- a/plugins/discovery-gce/build.gradle +++ b/plugins/discovery-gce/build.gradle @@ -25,7 +25,7 @@ dependencies { api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" api "commons-codec:commons-codec:${versions.commonscodec}" - runtimeOnly 'com.google.guava:guava:31.0.1-jre' + runtimeOnly 'com.google.guava:guava:32.0.1-jre' runtimeOnly 'com.google.guava:failureaccess:1.0.1' api 'io.opencensus:opencensus-api:0.30.0' api 'io.opencensus:opencensus-contrib-http-util:0.30.0' @@ -119,7 +119,6 @@ tasks.named("thirdPartyAudit").configure { 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray', 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$1', 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$2', - 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$3', 'com.google.common.hash.Striped64', 'com.google.common.hash.Striped64$1', 'com.google.common.hash.Striped64$Cell', diff --git a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java index 01d924e31fd0..0ce0952b6a7f 100644 --- a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java +++ b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceDiscoveryTests.java @@ -9,9 +9,9 @@ package org.elasticsearch.discovery.gce; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.cloud.gce.GceInstancesServiceImpl; import org.elasticsearch.cloud.gce.GceMetadataService; +import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; @@ -88,7 +88,7 @@ public void setProjectName() { public void createTransportService() { transportService = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, null diff --git a/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldTypeTests.java b/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldTypeTests.java index efb06e5dd751..cadd341c8b1a 100644 --- a/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldTypeTests.java +++ b/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldTypeTests.java @@ -29,7 +29,7 @@ public void testIntervals() throws IOException { } public void testFetchSourceValue() throws IOException { - MappedFieldType fieldType = new AnnotatedTextFieldMapper.Builder("field", IndexVersion.CURRENT, createDefaultIndexAnalyzers()) + MappedFieldType fieldType = new AnnotatedTextFieldMapper.Builder("field", IndexVersion.current(), createDefaultIndexAnalyzers()) .build(MapperBuilderContext.root(false)) .fieldType(); diff --git a/qa/ccs-unavailable-clusters/src/javaRestTest/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java b/qa/ccs-unavailable-clusters/src/javaRestTest/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java index 66fbe9ced81f..2aecc61d1017 100644 --- a/qa/ccs-unavailable-clusters/src/javaRestTest/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java +++ b/qa/ccs-unavailable-clusters/src/javaRestTest/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java @@ -14,7 +14,6 @@ import org.apache.lucene.search.TotalHits; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; @@ -37,6 +36,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; @@ -92,7 +92,7 @@ public void tearDown() throws Exception { private static MockTransportService startTransport( final String id, final List knownNodes, - final Version version, + final VersionInformation version, final TransportVersion transportVersion, final ThreadPool threadPool ) { @@ -160,7 +160,7 @@ public void testSearchSkipUnavailable() throws IOException { MockTransportService remoteTransport = startTransport( "node0", new CopyOnWriteArrayList<>(), - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool ) @@ -266,7 +266,7 @@ public void testSkipUnavailableDependsOnSeeds() throws IOException { MockTransportService remoteTransport = startTransport( "node0", new CopyOnWriteArrayList<>(), - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool ) diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 6736e8d8c1fc..52e4bdf0e6fc 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -41,6 +41,7 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.json.JsonXContent; +import org.hamcrest.Matchers; import org.junit.Before; import org.junit.ClassRule; import org.junit.rules.RuleChain; @@ -74,6 +75,7 @@ import static org.elasticsearch.transport.RemoteClusterService.REMOTE_CLUSTER_COMPRESS; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -959,7 +961,7 @@ public void testRecovery() throws Exception { assertTrue("expected to find a primary but didn't\n" + recoveryResponse, foundPrimary); assertEquals("mismatch while checking for translog recovery\n" + recoveryResponse, shouldHaveTranslog, restoredFromTranslog); - String currentLuceneVersion = IndexVersion.CURRENT.luceneVersion().toString(); + String currentLuceneVersion = IndexVersion.current().luceneVersion().toString(); String bwcLuceneVersion = getOldClusterVersion().luceneVersion().toString(); String minCompatibleBWCVersion = Version.CURRENT.minimumCompatibilityVersion().luceneVersion().toString(); if (shouldHaveTranslog && false == currentLuceneVersion.equals(bwcLuceneVersion)) { @@ -1303,7 +1305,11 @@ private void checkSnapshot(final String snapshotName, final int count, final Ver assertEquals(singletonList(snapshotName), XContentMapValues.extractValue("snapshots.snapshot", snapResponse)); assertEquals(singletonList("SUCCESS"), XContentMapValues.extractValue("snapshots.state", snapResponse)); - assertEquals(singletonList(tookOnVersion.toString()), XContentMapValues.extractValue("snapshots.version", snapResponse)); + // the format can change depending on the ES node version running & this test code running + assertThat( + XContentMapValues.extractValue("snapshots.version", snapResponse), + either(Matchers.equalTo(List.of(tookOnVersion.toString()))).or(equalTo(List.of(tookOnVersion.indexVersion.toString()))) + ); // Remove the routing setting and template so we can test restoring them. Request clearRoutingFromSettings = new Request("PUT", "/_cluster/settings"); diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/FeatureUpgradeIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/FeatureUpgradeIT.java index 40d3c50e9a44..74ba81e9555e 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/FeatureUpgradeIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/FeatureUpgradeIT.java @@ -17,6 +17,7 @@ import java.util.List; import java.util.Map; +import static org.hamcrest.Matchers.aMapWithSize; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -92,8 +93,8 @@ public void testGetFeatureUpgradeStatus() throws Exception { .findFirst() .orElse(Collections.emptyMap()); - assertThat(feature.size(), equalTo(4)); - assertThat(feature.get("minimum_index_version"), equalTo(UPGRADE_FROM_VERSION.toString())); + assertThat(feature, aMapWithSize(4)); + assertThat(feature.get("minimum_index_version"), equalTo(Integer.toString(UPGRADE_FROM_VERSION.id))); if (UPGRADE_FROM_VERSION.before(TransportGetFeatureUpgradeStatusAction.NO_UPGRADE_REQUIRED_VERSION)) { assertThat(feature.get("migration_status"), equalTo("MIGRATION_NEEDED")); } else { diff --git a/qa/system-indices/src/javaRestTest/java/org/elasticsearch/system/indices/FeatureUpgradeApiIT.java b/qa/system-indices/src/javaRestTest/java/org/elasticsearch/system/indices/FeatureUpgradeApiIT.java index 32e5abfb9a3e..48f987d0359f 100644 --- a/qa/system-indices/src/javaRestTest/java/org/elasticsearch/system/indices/FeatureUpgradeApiIT.java +++ b/qa/system-indices/src/javaRestTest/java/org/elasticsearch/system/indices/FeatureUpgradeApiIT.java @@ -8,12 +8,12 @@ package org.elasticsearch.system.indices; -import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.test.rest.ESRestTestCase; import org.junit.After; @@ -61,7 +61,7 @@ public void testGetFeatureUpgradedStatuses() throws Exception { .orElse(Collections.emptyMap()); assertThat(testFeature.size(), equalTo(4)); - assertThat(testFeature.get("minimum_index_version"), equalTo(Version.CURRENT.toString())); + assertThat(testFeature.get("minimum_index_version"), equalTo(IndexVersion.current().toString())); assertThat(testFeature.get("migration_status"), equalTo("NO_MIGRATION_NEEDED")); assertThat(testFeature.get("indices"), instanceOf(List.class)); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_data_lifecycle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_data_lifecycle.json index 96aa5d21e62c..bf153ff72e35 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_data_lifecycle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_data_lifecycle.json @@ -1,7 +1,7 @@ { "indices.delete_data_lifecycle":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/dlm-delete-lifecycle.html", + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-delete-lifecycle.html", "description":"Deletes the data lifecycle of the selected data streams." }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.explain_data_lifecycle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.explain_data_lifecycle.json index 51ee3b554f1b..3232407000b1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.explain_data_lifecycle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.explain_data_lifecycle.json @@ -1,8 +1,8 @@ { "indices.explain_data_lifecycle": { "documentation": { - "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/dlm-explain-lifecycle.html", - "description": "Retrieves information about the index's current DLM lifecycle, such as any potential encountered error, time since creation etc." + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams-explain-lifecycle.html", + "description": "Retrieves information about the index's current data stream lifecycle, such as any potential encountered error, time since creation etc." }, "stability": "experimental", "visibility": "public", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_data_lifecycle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_data_lifecycle.json index 7cbfbfb9e500..2cb934c84bcb 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_data_lifecycle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_data_lifecycle.json @@ -1,7 +1,7 @@ { "indices.get_data_lifecycle":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/dlm-get-lifecycle.html", + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-get-lifecycle.html", "description":"Returns the data lifecycle of the selected data streams." }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_data_lifecycle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_data_lifecycle.json index 63cf5addd3bb..b2f19bdc3fc1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_data_lifecycle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_data_lifecycle.json @@ -1,7 +1,7 @@ { "indices.put_data_lifecycle":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/dlm-put-lifecycle.html", + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams-put-lifecycle.html", "description":"Updates the data lifecycle of the selected data streams." }, "stability":"experimental", diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml index f34aef9b8332..c67ae7c0bfd5 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml @@ -94,8 +94,9 @@ setup: --- "kNN search plus query": - skip: - version: ' - 8.3.99' - reason: 'kNN added to search endpoint in 8.4' + version: all #' - 8.3.99' + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/97144" + # reason: 'kNN added to search endpoint in 8.4' - do: search: index: test @@ -121,8 +122,9 @@ setup: --- "kNN multi-field search with query": - skip: - version: ' - 8.6.99' - reason: 'multi-field kNN search added to search endpoint in 8.7' + version: all #' - 8.6.99' + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/97144" + # reason: 'multi-field kNN search added to search endpoint in 8.7' - do: search: index: test diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml index 873b6d87cac6..4d003a5c3b7b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml @@ -66,6 +66,9 @@ setup: --- "kNN search plus query": + - skip: + version: all + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/97144" - do: search: index: test diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/120_counter_fields.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/120_counter_fields.yml index d78f6c5c7e3a..f993b18ddd8a 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/120_counter_fields.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/120_counter_fields.yml @@ -1,8 +1,8 @@ --- "avg aggregation on counter field": - skip: - version: " - 8.6.99" - reason: "counter field support added in 8.7" + version: " - 8.9.99" + reason: "counter field support added in 8.7, but exception message changed in 8.10.0" - do: indices.create: @@ -43,7 +43,7 @@ - match: { aggregations.the_counter_avg.value: null } - do: - catch: /Field \[counter_field\] of type \[long\] is not supported for aggregation \[avg\]/ + catch: /Field \[counter_field\] of type \[long\]\[counter\] is not supported for aggregation \[avg\]/ search: index: myindex2 body: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/130_position_fields.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/130_position_fields.yml index 1662c4a591eb..89e8ebb3e24e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/130_position_fields.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/130_position_fields.yml @@ -73,10 +73,10 @@ multi-valued fields unsupported: --- "avg aggregation on position field unsupported": - skip: - version: " - 8.7.99" - reason: position metric introduced in 8.8.0 + version: " - 8.9.99" + reason: position metric introduced in 8.8.0, but exception message changed in 8.10.0 - do: - catch: /Field \[location\] of type \[geo_point\] is not supported for aggregation \[avg\]/ + catch: /Field \[location\] of type \[geo_point\]\[position\] is not supported for aggregation \[avg\]/ search: index: locations body: diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java index e8c1dd85c6a1..bf41c095df96 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java @@ -110,7 +110,7 @@ private void splitToN(int sourceShards, int firstSplitShards, int secondSplitSha useRoutingPartition = randomBoolean(); } if (useRouting && useMixedRouting == false && useRoutingPartition) { - int numRoutingShards = MetadataCreateIndexService.calculateNumRoutingShards(secondSplitShards, IndexVersion.CURRENT) - 1; + int numRoutingShards = MetadataCreateIndexService.calculateNumRoutingShards(secondSplitShards, IndexVersion.current()) - 1; settings.put("index.routing_partition_size", randomIntBetween(1, numRoutingShards)); if (useNested) { createInitialIndex.setMapping("_routing", "required=true", "nested1", "type=nested"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java index a7460ef0e8ab..1a4c90830664 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java @@ -32,6 +32,7 @@ import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.ClusterPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; @@ -415,7 +416,8 @@ public Collection createComponents( final IndexNameExpressionResolver expressionResolver, final Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { clusterService.addListener(event -> { final ClusterState state = event.state(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java index 4c68bb579724..9660be019383 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; @@ -77,7 +78,8 @@ public Collection createComponents( IndexNameExpressionResolver expressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { clusterService.getClusterSettings().addSettingsUpdateConsumer(UPDATE_TEMPLATE_DUMMY_SETTING, integer -> { logger.debug("the template dummy setting was updated to {}", integer); @@ -95,7 +97,8 @@ public Collection createComponents( expressionResolver, repositoriesServiceSupplier, tracer, - allocationService + allocationService, + indicesService ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/settings/ClusterSettingsUpdateWithFaultyMasterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/settings/ClusterSettingsUpdateWithFaultyMasterIT.java new file mode 100644 index 000000000000..daf94b6739f6 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/settings/ClusterSettingsUpdateWithFaultyMasterIT.java @@ -0,0 +1,107 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.settings; + +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.disruption.NetworkDisruption; +import org.elasticsearch.test.transport.MockTransportService; + +import java.util.Collection; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.not; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0) +public class ClusterSettingsUpdateWithFaultyMasterIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return List.of(BlockingClusterSettingTestPlugin.class, MockTransportService.TestPlugin.class); + } + + public void testClusterSettingsUpdateNotAcknowledged() throws Exception { + final var nodes = internalCluster().startMasterOnlyNodes(3); + final String masterNode = internalCluster().getMasterName(); + final String blockedNode = randomValueOtherThan(masterNode, () -> randomFrom(nodes)); + assertThat(blockedNode, not(equalTo(internalCluster().getMasterName()))); + ensureStableCluster(3); + + NetworkDisruption networkDisruption = new NetworkDisruption( + new NetworkDisruption.TwoPartitions( + Set.of(blockedNode), + nodes.stream().filter(n -> n.equals(blockedNode) == false).collect(Collectors.toSet()) + ), + NetworkDisruption.DISCONNECT + ); + internalCluster().setDisruptionScheme(networkDisruption); + + logger.debug("--> updating cluster settings"); + var future = client(masterNode).admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(BlockingClusterSettingTestPlugin.TEST_BLOCKING_SETTING.getKey(), true).build()) + .setMasterNodeTimeout(TimeValue.timeValueMillis(10L)) + .execute(); + + logger.debug("--> waiting for cluster state update to be blocked"); + BlockingClusterSettingTestPlugin.blockLatch.await(); + + logger.debug("--> isolating master eligible node [{}] from other nodes", blockedNode); + networkDisruption.startDisrupting(); + + logger.debug("--> unblocking cluster state update"); + BlockingClusterSettingTestPlugin.releaseLatch.countDown(); + + assertThat("--> cluster settings update should not be acknowledged", future.get().isAcknowledged(), equalTo(false)); + + logger.debug("--> stop network disruption"); + networkDisruption.stopDisrupting(); + ensureStableCluster(3); + } + + public static class BlockingClusterSettingTestPlugin extends Plugin { + + private static final Logger logger = LogManager.getLogger(BlockingClusterSettingTestPlugin.class); + + private static final CountDownLatch blockLatch = new CountDownLatch(1); + private static final CountDownLatch releaseLatch = new CountDownLatch(1); + private static final AtomicBoolean blockOnce = new AtomicBoolean(); + + public static final Setting TEST_BLOCKING_SETTING = Setting.boolSetting("cluster.test.blocking_setting", false, value -> { + if (blockOnce.compareAndSet(false, true)) { + logger.debug("--> setting validation is now blocking cluster state update"); + blockLatch.countDown(); + try { + logger.debug("--> setting validation is now waiting for release"); + releaseLatch.await(); + logger.debug("--> setting validation is done"); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new AssertionError(e); + } + } + }, Setting.Property.NodeScope, Setting.Property.Dynamic); + + @Override + public List> getSettings() { + return List.of(TEST_BLOCKING_SETTING); + } + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java index 08d69e65acb7..d0311740fc63 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.cluster.shards; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; @@ -20,6 +19,7 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.indices.ShardLimitValidator; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.snapshots.SnapshotState; @@ -294,7 +294,7 @@ public void testRestoreSnapshotOverLimit() { assertThat(snapshotInfos.size(), equalTo(1)); SnapshotInfo snapshotInfo = snapshotInfos.get(0); assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); - assertThat(snapshotInfo.version(), equalTo(Version.CURRENT)); + assertThat(snapshotInfo.version(), equalTo(IndexVersion.current())); // Test restore after index deletion logger.info("--> delete indices"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/common/settings/UpgradeSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/common/settings/UpgradeSettingsIT.java deleted file mode 100644 index 55983e0959c7..000000000000 --- a/server/src/internalClusterTest/java/org/elasticsearch/common/settings/UpgradeSettingsIT.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.common.settings; - -import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequestBuilder; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESSingleNodeTestCase; -import org.junit.After; - -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.function.BiConsumer; -import java.util.function.Function; - -import static org.hamcrest.Matchers.equalTo; - -public class UpgradeSettingsIT extends ESSingleNodeTestCase { - - @After - public void cleanup() throws Exception { - clusterAdmin().prepareUpdateSettings() - .setPersistentSettings(Settings.builder().putNull("*")) - .setTransientSettings(Settings.builder().putNull("*")) - .get(); - } - - @Override - protected Collection> getPlugins() { - return Collections.singletonList(UpgradeSettingsPlugin.class); - } - - public static class UpgradeSettingsPlugin extends Plugin { - - static final Setting oldSetting = Setting.simpleString("foo.old", Setting.Property.Dynamic, Setting.Property.NodeScope); - static final Setting newSetting = Setting.simpleString("foo.new", Setting.Property.Dynamic, Setting.Property.NodeScope); - - public UpgradeSettingsPlugin() { - - } - - @Override - public List> getSettings() { - return Arrays.asList(oldSetting, newSetting); - } - - @Override - public List> getSettingUpgraders() { - return Collections.singletonList(new SettingUpgrader() { - - @Override - public Setting getSetting() { - return oldSetting; - } - - @Override - public String getKey(final String key) { - return "foo.new"; - } - - @Override - public String getValue(final String value) { - return "new." + value; - } - }); - } - } - - public void testUpgradePersistentSettingsOnUpdate() { - runUpgradeSettingsOnUpdateTest((settings, builder) -> builder.setPersistentSettings(settings), Metadata::persistentSettings); - } - - public void testUpgradeTransientSettingsOnUpdate() { - runUpgradeSettingsOnUpdateTest((settings, builder) -> builder.setTransientSettings(settings), Metadata::transientSettings); - } - - private void runUpgradeSettingsOnUpdateTest( - final BiConsumer consumer, - final Function settingsFunction - ) { - final String value = randomAlphaOfLength(8); - final ClusterUpdateSettingsRequestBuilder builder = clusterAdmin().prepareUpdateSettings(); - consumer.accept(Settings.builder().put("foo.old", value).build(), builder); - builder.get(); - - final ClusterStateResponse response = clusterAdmin().prepareState().clear().setMetadata(true).get(); - - assertFalse(UpgradeSettingsPlugin.oldSetting.exists(settingsFunction.apply(response.getState().metadata()))); - assertTrue(UpgradeSettingsPlugin.newSetting.exists(settingsFunction.apply(response.getState().metadata()))); - assertThat(UpgradeSettingsPlugin.newSetting.get(settingsFunction.apply(response.getState().metadata())), equalTo("new." + value)); - } - -} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/health/GetHealthActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/health/GetHealthActionIT.java index 8d4789da7d1d..ba59d74768fa 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/health/GetHealthActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/health/GetHealthActionIT.java @@ -22,6 +22,7 @@ import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.health.node.HealthInfo; import org.elasticsearch.health.stats.HealthApiStatsAction; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.HealthPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; @@ -107,7 +108,8 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { healthIndicatorServices.add(new IlmHealthIndicatorService(clusterService)); healthIndicatorServices.add(new SlmHealthIndicatorService(clusterService)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/health/HealthServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/health/HealthServiceIT.java index e987a6dafe6b..53b568f0a25e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/health/HealthServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/health/HealthServiceIT.java @@ -22,6 +22,7 @@ import org.elasticsearch.health.node.DiskHealthInfo; import org.elasticsearch.health.node.FetchHealthInfoCacheAction; import org.elasticsearch.health.node.HealthInfo; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.HealthPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; @@ -136,7 +137,8 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { healthIndicatorServices.add(new TestHealthIndicatorService()); return new ArrayList<>(healthIndicatorServices); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/FinalPipelineIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/FinalPipelineIT.java index da4b8a21a340..bab4b2c08882 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/FinalPipelineIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/FinalPipelineIT.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.ingest.AbstractProcessor; import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestDocument; @@ -405,7 +406,8 @@ public Collection createComponents( final IndexNameExpressionResolver expressionResolver, final Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { return List.of(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/SettingsListenerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/SettingsListenerIT.java index d28e8f0bfacf..d1095354e112 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/SettingsListenerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/SettingsListenerIT.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; @@ -75,7 +76,8 @@ public Collection createComponents( IndexNameExpressionResolver expressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { return Collections.singletonList(service); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedTranslogIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedTranslogIT.java index beafe14079b4..8731c319043a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedTranslogIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedTranslogIT.java @@ -88,9 +88,12 @@ public void onAllNodesStopped() throws Exception { final UnassignedInfo unassignedInfo = allocationExplainResponse.getExplanation().getUnassignedInfo(); assertThat(description, unassignedInfo, not(nullValue())); assertThat(description, unassignedInfo.getReason(), equalTo(UnassignedInfo.Reason.ALLOCATION_FAILED)); - final Throwable cause = ExceptionsHelper.unwrap(unassignedInfo.getFailure(), TranslogCorruptedException.class); - assertThat(description, cause, not(nullValue())); - assertThat(description, cause.getMessage(), containsString(translogPath.toString())); + var failure = unassignedInfo.getFailure(); + assertNotNull(failure); + final Throwable cause = ExceptionsHelper.unwrap(failure, TranslogCorruptedException.class); + if (cause != null) { + assertThat(description, cause.getMessage(), containsString(translogPath.toString())); + } }); assertThat( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index ae0bcfff992f..393c9db8c79e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -24,7 +24,6 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; @@ -679,7 +678,7 @@ public void testSnapshotRecovery() throws Exception { SnapshotRecoverySource recoverySource = new SnapshotRecoverySource( ((SnapshotRecoverySource) recoveryState.getRecoverySource()).restoreUUID(), new Snapshot(REPO_NAME, createSnapshotResponse.getSnapshotInfo().snapshotId()), - Version.CURRENT, + IndexVersion.current(), repositoryData.resolveIndexId(INDEX_NAME) ); assertRecoveryState(recoveryState, 0, recoverySource, true, Stage.DONE, null, nodeA); @@ -1683,7 +1682,7 @@ private long getLocalCheckpointOfSafeCommit(IndexCommit safeIndexCommit) throws final Query query = new BooleanQuery.Builder().add( LongPoint.newRangeQuery(SeqNoFieldMapper.NAME, commitLocalCheckpoint + 1, Long.MAX_VALUE), BooleanClause.Occur.MUST - ).add(Queries.newNonNestedFilter(IndexVersion.CURRENT), BooleanClause.Occur.MUST).build(); + ).add(Queries.newNonNestedFilter(IndexVersion.current()), BooleanClause.Occur.MUST).build(); final Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1.0f); for (LeafReaderContext leaf : directoryReader.leaves()) { final Scorer scorer = weight.scorer(leaf); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java index f95c4cc3ee62..77dec0b574c0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java @@ -193,7 +193,7 @@ public void testOnlyFetchesSnapshotFromEnabledRepositories() throws Exception { assertThat(commitVersion, is(equalTo(Version.CURRENT))); final org.apache.lucene.util.Version commitLuceneVersion = shardSnapshotData.getCommitLuceneVersion(); assertThat(commitLuceneVersion, is(notNullValue())); - assertThat(commitLuceneVersion, is(equalTo(IndexVersion.CURRENT.luceneVersion()))); + assertThat(commitLuceneVersion, is(equalTo(IndexVersion.current().luceneVersion()))); assertThat(shardSnapshotInfo.getShardId(), is(equalTo(shardId))); assertThat(shardSnapshotInfo.getSnapshot().getSnapshotId().getName(), is(equalTo(snapshotName))); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestAsyncProcessorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestAsyncProcessorIT.java index 9bf33fb5ebb9..959611f9fd85 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestAsyncProcessorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestAsyncProcessorIT.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; @@ -97,7 +98,8 @@ public Collection createComponents( IndexNameExpressionResolver expressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { this.threadPool = threadPool; return List.of(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java index ce98c1721c0c..8662e2748feb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Strings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.IndexMetaDataGenerations; import org.elasticsearch.repositories.Repository; @@ -340,7 +341,7 @@ public void testHandlingMissingRootLevelSnapshotMetadata() throws Exception { ); final RepositoryData finalRepositoryData = getRepositoryData(repoName); for (SnapshotId snapshotId : finalRepositoryData.getSnapshotIds()) { - assertThat(finalRepositoryData.getVersion(snapshotId), is(Version.CURRENT)); + assertThat(finalRepositoryData.getVersion(snapshotId), is(IndexVersion.current())); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java index be1bdbfcdd9a..0a08c02116cd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java @@ -11,7 +11,6 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; @@ -25,6 +24,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.indices.InvalidIndexNameException; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.blobstore.FileRestoreContext; @@ -905,8 +905,8 @@ public void testFailOnAncientVersion() throws Exception { final String repoName = "test-repo"; final Path repoPath = randomRepoPath(); createRepository(repoName, FsRepository.TYPE, repoPath); - final Version oldVersion = Version.CURRENT.previousMajor().previousMajor(); - final String oldSnapshot = initWithSnapshotVersion(repoName, repoPath, oldVersion); + final IndexVersion oldVersion = IndexVersion.fromId(IndexVersion.MINIMUM_COMPATIBLE.id() - 1); + final String oldSnapshot = initWithSnapshotVersion(repoName, repoPath, oldVersion.toVersion()); final SnapshotRestoreException snapshotRestoreException = expectThrows( SnapshotRestoreException.class, () -> clusterAdmin().prepareRestoreSnapshot(repoName, oldSnapshot).execute().actionGet() @@ -917,7 +917,7 @@ public void testFailOnAncientVersion() throws Exception { "the snapshot was created with Elasticsearch version [" + oldVersion + "] which is below the current versions minimum index compatibility version [" - + Version.CURRENT.minimumIndexCompatibilityVersion() + + IndexVersion.MINIMUM_COMPATIBLE + "]" ) ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 6ac4ddba7241..97d0a8332cff 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -11,7 +11,6 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; @@ -49,6 +48,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.shard.IndexShard; @@ -160,7 +160,7 @@ public void testBasicWorkFlow() throws Exception { assertThat(snapshotInfos.size(), equalTo(1)); SnapshotInfo snapshotInfo = snapshotInfos.get(0); assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); - assertThat(snapshotInfo.version(), equalTo(Version.CURRENT)); + assertThat(snapshotInfo.version(), equalTo(IndexVersion.current())); if (snapshotClosed) { assertAcked(indicesAdmin().prepareOpen(indicesToSnapshot).setWaitForActiveShards(ActiveShardCount.ALL).get()); @@ -2097,7 +2097,7 @@ public void testHiddenIndicesIncludedInSnapshot() throws Exception { assertThat(snapshotInfos.size(), equalTo(1)); SnapshotInfo snapshotInfo = snapshotInfos.get(0); assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); - assertThat(snapshotInfo.version(), equalTo(Version.CURRENT)); + assertThat(snapshotInfo.version(), equalTo(IndexVersion.current())); logger.info("--> deleting indices"); cluster().wipeIndices(normalIndex, hiddenIndex, dottedHiddenIndex); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java index 87c17de72e53..4f3c61c0df45 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java @@ -7,7 +7,6 @@ */ package org.elasticsearch.snapshots; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; @@ -26,6 +25,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.threadpool.ThreadPool; @@ -87,7 +87,7 @@ public void testStatusApiConsistency() throws Exception { assertThat(snapshotInfos.size(), equalTo(1)); SnapshotInfo snapshotInfo = snapshotInfos.get(0); assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); - assertThat(snapshotInfo.version(), equalTo(Version.CURRENT)); + assertThat(snapshotInfo.version(), equalTo(IndexVersion.current())); final List snapshotStatus = clusterAdmin().prepareSnapshotStatus("test-repo") .setSnapshots("test-snap") diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index 42125556f771..cfd1f571e967 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -371,6 +371,7 @@ opens org.elasticsearch.common.logging to org.apache.logging.log4j.core; exports org.elasticsearch.action.dlm; + exports org.elasticsearch.action.downsample; provides java.util.spi.CalendarDataProvider with org.elasticsearch.common.time.IsoCalendarDataProvider; provides org.elasticsearch.xcontent.ErrorOnUnknown with org.elasticsearch.common.xcontent.SuggestingErrorOnUnknown; diff --git a/server/src/main/java/org/elasticsearch/TransportVersion.java b/server/src/main/java/org/elasticsearch/TransportVersion.java index 4b6c768c4efe..ab0cc91aa02c 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersion.java +++ b/server/src/main/java/org/elasticsearch/TransportVersion.java @@ -25,38 +25,43 @@ import java.util.TreeMap; /** - * Represents the version of the wire protocol used to communicate between ES nodes. + * Represents the version of the wire protocol used to communicate between a pair of ES nodes. *

- * Prior to 8.8.0, the release {@link Version} was used everywhere. This class separates the wire protocol version - * from the release version. + * Prior to 8.8.0, the release {@link Version} was used everywhere. This class separates the wire protocol version from the release version. *

- * Each transport version constant has an id number, which for versions prior to 8.9.0 is the same as the release version - * for backwards compatibility. In 8.9.0 this is changed to an incrementing number, disconnected from the release version. + * Each transport version constant has an id number, which for versions prior to 8.9.0 is the same as the release version for backwards + * compatibility. In 8.9.0 this is changed to an incrementing number, disconnected from the release version. *

- * Each version constant has a unique id string. This is not actually used in the binary protocol, but is there to ensure - * each protocol version is only added to the source file once. This string needs to be unique (normally a UUID, - * but can be any other unique nonempty string). - * If two concurrent PRs add the same transport version, the different unique ids cause a git conflict, ensuring the second PR to be merged - * must be updated with the next free version first. Without the unique id string, git will happily merge the two versions together, - * resulting in the same transport version being used across multiple commits, - * causing problems when you try to upgrade between those two merged commits. + * Each version constant has a unique id string. This is not actually used in the binary protocol, but is there to ensure each protocol + * version is only added to the source file once. This string needs to be unique (normally a UUID, but can be any other unique nonempty + * string). If two concurrent PRs add the same transport version, the different unique ids cause a git conflict, ensuring that the second PR + * to be merged must be updated with the next free version first. Without the unique id string, git will happily merge the two versions + * together, resulting in the same transport version being used across multiple commits, causing problems when you try to upgrade between + * those two merged commits. *

Version compatibility

- * The earliest compatible version is hardcoded in the {@link #MINIMUM_COMPATIBLE} field. Previously, this was dynamically calculated - * from the major/minor versions of {@link Version}, but {@code TransportVersion} does not have separate major/minor version numbers. - * So the minimum compatible version is hard-coded as the transport version used by the highest minor release of the previous major version. - * {@link #MINIMUM_COMPATIBLE} should be updated appropriately whenever a major release happens. + * The earliest compatible version is hardcoded in the {@link #MINIMUM_COMPATIBLE} field. Previously, this was dynamically calculated from + * the major/minor versions of {@link Version}, but {@code TransportVersion} does not have separate major/minor version numbers. So the + * minimum compatible version is hard-coded as the transport version used by the highest minor release of the previous major version. {@link + * #MINIMUM_COMPATIBLE} should be updated appropriately whenever a major release happens. *

- * The earliest CCS compatible version is hardcoded at {@link #MINIMUM_CCS_VERSION}, as the transport version used by the - * previous minor release. This should be updated appropriately whenever a minor release happens. + * The earliest CCS compatible version is hardcoded at {@link #MINIMUM_CCS_VERSION}, as the transport version used by the previous minor + * release. This should be updated appropriately whenever a minor release happens. *

Adding a new version

- * A new transport version should be added every time a change is made to the serialization protocol of one or more classes. - * Each transport version should only be used in a single merged commit (apart from BwC versions copied from {@link Version}). + * A new transport version should be added every time a change is made to the serialization protocol of one or more classes. Each + * transport version should only be used in a single merged commit (apart from BwC versions copied from {@link Version}). *

- * To add a new transport version, add a new constant at the bottom of the list that is one greater than the current highest version, - * ensure it has a unique id, and update the {@link CurrentHolder#CURRENT} constant to point to the new version. + * To add a new transport version, add a new constant at the bottom of the list that is one greater than the current highest version, ensure + * it has a unique id, and update the {@link CurrentHolder#CURRENT} constant to point to the new version. *

Reverting a transport version

- * If you revert a commit with a transport version change, you must ensure there is a new transport version - * representing the reverted change. Do not let the transport version go backwards, it must always be incremented. + * If you revert a commit with a transport version change, you must ensure there is a new transport version representing + * the reverted change. Do not let the transport version go backwards, it must always be incremented. + *

Scope of usefulness of {@link TransportVersion}

+ * {@link TransportVersion} is a property of the transport connection between a pair of nodes, and should not be used as an indication of + * the version of any single node. The {@link TransportVersion} of a connection is negotiated between the nodes via some logic that is not + * totally trivial, and may change in future. Any other places that might make decisions based on this version effectively have to reproduce + * this negotiation logic, which would be fragile. If you need to make decisions based on the version of a single node, do so using a + * different version value. If you need to know whether the cluster as a whole speaks a new enough {@link TransportVersion} to understand a + * newly-added feature, use {@link org.elasticsearch.cluster.ClusterState#getMinTransportVersion}. */ public record TransportVersion(int id) implements Comparable { @@ -152,9 +157,13 @@ private static TransportVersion registerTransportVersion(int id, String uniqueId public static final TransportVersion V_8_500_022 = registerTransportVersion(8_500_022, "4993c724-7a81-4955-84e7-403484610091"); public static final TransportVersion V_8_500_023 = registerTransportVersion(8_500_023, "01b06435-5d73-42ff-a121-3b36b771375e"); public static final TransportVersion V_8_500_024 = registerTransportVersion(8_500_024, "db337007-f823-4dbd-968e-375383814c17"); + public static final TransportVersion V_8_500_025 = registerTransportVersion(8_500_025, "b2ab7b75-5ac2-4a3b-bbb6-8789ca66722d"); + public static final TransportVersion V_8_500_026 = registerTransportVersion(8_500_026, "965d294b-14aa-4abb-bcfc-34631187941d"); + public static final TransportVersion V_8_500_027 = registerTransportVersion(8_500_027, "B151D967-8E7C-401C-8275-0ABC06335F2D"); + public static final TransportVersion V_8_500_028 = registerTransportVersion(8_500_028, "a6592d08-15cb-4e1a-b9b4-b2ba24058444"); private static class CurrentHolder { - private static final TransportVersion CURRENT = findCurrent(V_8_500_024); + private static final TransportVersion CURRENT = findCurrent(V_8_500_028); // finds the pluggable current version, or uses the given fallback private static TransportVersion findCurrent(TransportVersion fallback) { diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 3c126e8604c9..a03914d3e509 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -112,6 +112,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_7_17_9 = new Version(7_17_09_99, IndexVersion.V_7_17_9); public static final Version V_7_17_10 = new Version(7_17_10_99, IndexVersion.V_7_17_10); public static final Version V_7_17_11 = new Version(7_17_11_99, IndexVersion.V_7_17_11); + public static final Version V_7_17_12 = new Version(7_17_12_99, IndexVersion.V_7_17_12); public static final Version V_8_0_0 = new Version(8_00_00_99, IndexVersion.V_8_0_0); public static final Version V_8_0_1 = new Version(8_00_01_99, IndexVersion.V_8_0_1); public static final Version V_8_1_0 = new Version(8_01_00_99, IndexVersion.V_8_1_0); @@ -142,6 +143,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_8_8_0 = new Version(8_08_00_99, IndexVersion.V_8_8_0); public static final Version V_8_8_1 = new Version(8_08_01_99, IndexVersion.V_8_8_1); public static final Version V_8_8_2 = new Version(8_08_02_99, IndexVersion.V_8_8_2); + public static final Version V_8_8_3 = new Version(8_08_03_99, IndexVersion.V_8_8_3); public static final Version V_8_9_0 = new Version(8_09_00_99, IndexVersion.V_8_9_0); public static final Version V_8_10_0 = new Version(8_10_00_99, IndexVersion.V_8_10_0); public static final Version CURRENT = V_8_10_0; diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 7bc4f64c0b7f..e9f523fb3812 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -572,7 +572,7 @@ public ActionModule( actionPlugins.stream().flatMap(p -> p.indicesAliasesRequestValidators().stream()).toList() ); headersToCopy = headers; - restController = new RestController(restInterceptor, nodeClient, circuitBreakerService, usageService, tracer, serverlessEnabled); + restController = new RestController(restInterceptor, nodeClient, circuitBreakerService, usageService, tracer); reservedClusterStateService = new ReservedClusterStateService(clusterService, reservedStateHandlers); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponse.java index 74ac542214a9..4c2b97af9bd2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponse.java @@ -9,12 +9,12 @@ package org.elasticsearch.action.admin.cluster.migration; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -124,7 +124,7 @@ public static UpgradeStatus combine(UpgradeStatus... statuses) { */ public static class FeatureUpgradeStatus implements Writeable, ToXContentObject { private final String featureName; - private final Version minimumIndexVersion; + private final IndexVersion minimumIndexVersion; private final UpgradeStatus upgradeStatus; private final List indexInfos; @@ -136,7 +136,7 @@ public static class FeatureUpgradeStatus implements Writeable, ToXContentObject */ public FeatureUpgradeStatus( String featureName, - Version minimumIndexVersion, + IndexVersion minimumIndexVersion, UpgradeStatus upgradeStatus, List indexInfos ) { @@ -152,7 +152,7 @@ public FeatureUpgradeStatus( */ public FeatureUpgradeStatus(StreamInput in) throws IOException { this.featureName = in.readString(); - this.minimumIndexVersion = Version.readVersion(in); + this.minimumIndexVersion = IndexVersion.readVersion(in); this.upgradeStatus = in.readEnum(UpgradeStatus.class); this.indexInfos = in.readImmutableList(IndexInfo::new); } @@ -161,7 +161,7 @@ public String getFeatureName() { return this.featureName; } - public Version getMinimumIndexVersion() { + public IndexVersion getMinimumIndexVersion() { return this.minimumIndexVersion; } @@ -176,7 +176,7 @@ public List getIndexVersions() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(this.featureName); - Version.writeVersion(this.minimumIndexVersion, out); + IndexVersion.writeVersion(this.minimumIndexVersion, out); out.writeEnum(this.upgradeStatus); out.writeList(this.indexInfos); } @@ -240,16 +240,16 @@ public static class IndexInfo implements Writeable, ToXContentObject { ); private final String indexName; - private final Version version; + private final IndexVersion version; @Nullable private final Exception exception; // Present if this index failed /** * @param indexName Name of the index - * @param version Version of Elasticsearch that created the index + * @param version Index version * @param exception The exception that this index's migration failed with, if applicable */ - public IndexInfo(String indexName, Version version, Exception exception) { + public IndexInfo(String indexName, IndexVersion version, Exception exception) { this.indexName = indexName; this.version = version; this.exception = exception; @@ -261,7 +261,7 @@ public IndexInfo(String indexName, Version version, Exception exception) { */ public IndexInfo(StreamInput in) throws IOException { this.indexName = in.readString(); - this.version = Version.readVersion(in); + this.version = IndexVersion.readVersion(in); boolean hasException = in.readBoolean(); if (hasException) { this.exception = in.readException(); @@ -274,7 +274,7 @@ public String getIndexName() { return this.indexName; } - public Version getVersion() { + public IndexVersion getVersion() { return this.version; } @@ -285,7 +285,7 @@ public Exception getException() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(this.indexName); - Version.writeVersion(this.version, out); + IndexVersion.writeVersion(this.version, out); if (exception != null) { out.writeBoolean(true); out.writeException(this.exception); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java index 432e9670f365..46a46f8d3f44 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.persistent.PersistentTasksService; @@ -52,6 +53,7 @@ public class TransportGetFeatureUpgradeStatusAction extends TransportMasterNodeA * Once all feature migrations for 8.x -> 9.x have been tested, we can bump this to Version.V_8_0_0 */ public static final Version NO_UPGRADE_REQUIRED_VERSION = Version.V_7_0_0; + public static final IndexVersion NO_UPGRADE_REQUIRED_INDEX_VERSION = IndexVersion.V_7_0_0; private final SystemIndices systemIndices; PersistentTasksService persistentTasksService; @@ -124,14 +126,14 @@ static GetFeatureUpgradeStatusResponse.FeatureUpgradeStatus getFeatureUpgradeSta List indexInfos = getIndexInfos(state, feature); - Version minimumVersion = indexInfos.stream() + IndexVersion minimumVersion = indexInfos.stream() .map(GetFeatureUpgradeStatusResponse.IndexInfo::getVersion) - .min(Version::compareTo) - .orElse(Version.CURRENT); + .min(IndexVersion::compareTo) + .orElse(IndexVersion.current()); GetFeatureUpgradeStatusResponse.UpgradeStatus initialStatus; if (featureName.equals(currentFeature)) { initialStatus = IN_PROGRESS; - } else if (minimumVersion.before(NO_UPGRADE_REQUIRED_VERSION)) { + } else if (minimumVersion.before(NO_UPGRADE_REQUIRED_INDEX_VERSION)) { initialStatus = MIGRATION_NEEDED; } else { initialStatus = NO_MIGRATION_NEEDED; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index f81d36e5febf..3963faff454e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsUpdater; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.reservedstate.action.ReservedClusterSettingsAction; import org.elasticsearch.tasks.Task; @@ -159,7 +160,7 @@ protected ClusterUpdateSettingsResponse newResponse(boolean acknowledged) { @Override public void onAllNodesAcked() { - if (changed) { + if (reroute) { reroute(true); } else { super.onAllNodesAcked(); @@ -168,8 +169,8 @@ public void onAllNodesAcked() { @Override public void onAckFailure(Exception e) { - if (changed) { - reroute(true); + if (reroute) { + reroute(false); } else { super.onAckFailure(e); } @@ -177,7 +178,7 @@ public void onAckFailure(Exception e) { @Override public void onAckTimeout() { - if (changed) { + if (reroute) { reroute(false); } else { super.onAckTimeout(); @@ -185,17 +186,6 @@ public void onAckTimeout() { } private void reroute(final boolean updateSettingsAcked) { - // We're about to send a second update task, so we need to check if we're still the elected master - // For example the minimum_master_node could have been breached and we're no longer elected master, - // so we should *not* execute the reroute. - if (clusterService.state().nodes().isLocalNodeElectedMaster() == false) { - logger.debug("Skipping reroute after cluster update settings, because node is no longer master"); - listener.onResponse( - new ClusterUpdateSettingsResponse(updateSettingsAcked, updater.getTransientUpdates(), updater.getPersistentUpdate()) - ); - return; - } - // The reason the reroute needs to be sent as separate update task, is that all the *cluster* settings are encapsulated in // the components (e.g. FilterAllocationDecider), so the changes made by the first call aren't visible to the components // until the ClusterStateListener instances have been invoked, but are visible after the first update task has been @@ -238,40 +228,31 @@ public void onFailure(Exception e) { }); } - public static class ClusterUpdateSettingsTask extends AckedClusterStateUpdateTask { - protected volatile boolean changed = false; + private static class ClusterUpdateSettingsTask extends AckedClusterStateUpdateTask { + protected volatile boolean reroute = false; protected final SettingsUpdater updater; protected final ClusterUpdateSettingsRequest request; - private final ClusterSettings clusterSettings; - public ClusterUpdateSettingsTask( + ClusterUpdateSettingsTask( final ClusterSettings clusterSettings, Priority priority, ClusterUpdateSettingsRequest request, ActionListener listener ) { super(priority, request, listener); - this.clusterSettings = clusterSettings; this.updater = new SettingsUpdater(clusterSettings); this.request = request; } - /** - * Used by the reserved state handler {@link ReservedClusterSettingsAction} - */ - public ClusterUpdateSettingsTask(final ClusterSettings clusterSettings, ClusterUpdateSettingsRequest request) { - this(clusterSettings, Priority.IMMEDIATE, request, null); - } - @Override public ClusterState execute(final ClusterState currentState) { final ClusterState clusterState = updater.updateSettings( currentState, - clusterSettings.upgradeSettings(request.transientSettings()), - clusterSettings.upgradeSettings(request.persistentSettings()), + request.transientSettings(), + request.persistentSettings(), logger ); - changed = clusterState != currentState; + reroute = clusterState != currentState; return clusterState; } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java index 9206da3e14fa..f23ee6242b5c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; @@ -26,6 +27,7 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.node.NodeClosedException; @@ -136,6 +138,11 @@ public void onTimeout(TimeValue timeout) { } } + @SuppressForbidden(reason = "exposing ClusterState#transportVersions requires reading them") + private static Map getTransportVersions(ClusterState clusterState) { + return clusterState.transportVersions(); + } + private ClusterStateResponse buildResponse(final ClusterStateRequest request, final ClusterState currentState) { logger.trace("Serving cluster state request using version {}", currentState.version()); ClusterState.Builder builder = ClusterState.builder(currentState.getClusterName()); @@ -144,7 +151,7 @@ private ClusterStateResponse buildResponse(final ClusterStateRequest request, fi if (request.nodes()) { builder.nodes(currentState.nodes()); - builder.transportVersions(currentState.transportVersions()); + builder.transportVersions(getTransportVersions(currentState)); } if (request.routingTable()) { if (request.indices().length > 0) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/VersionStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/VersionStats.java index 5d7189143267..e0c2e511c687 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/VersionStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/VersionStats.java @@ -8,7 +8,6 @@ package org.elasticsearch.action.admin.cluster.stats; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; @@ -17,6 +16,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -42,9 +42,9 @@ public final class VersionStats implements ToXContentFragment, Writeable { private final Set versionStats; public static VersionStats of(Metadata metadata, List nodeResponses) { - final Map indexCounts = new HashMap<>(); - final Map primaryShardCounts = new HashMap<>(); - final Map primaryByteCounts = new HashMap<>(); + final Map indexCounts = new HashMap<>(); + final Map primaryShardCounts = new HashMap<>(); + final Map primaryByteCounts = new HashMap<>(); final Map> indexPrimaryShardStats = new HashMap<>(); // Build a map from index name to primary shard stats @@ -69,38 +69,20 @@ public static VersionStats of(Metadata metadata, List for (Map.Entry cursor : metadata.indices().entrySet()) { IndexMetadata indexMetadata = cursor.getValue(); // Increment version-specific index counts - indexCounts.compute(indexMetadata.getCreationVersion(), (v, i) -> { - if (i == null) { - return 1; - } else { - return i + 1; - } - }); + indexCounts.merge(indexMetadata.getCreationVersion(), 1, Integer::sum); // Increment version-specific primary shard counts - primaryShardCounts.compute(indexMetadata.getCreationVersion(), (v, i) -> { - if (i == null) { - return indexMetadata.getNumberOfShards(); - } else { - return i + indexMetadata.getNumberOfShards(); - } - }); + primaryShardCounts.merge(indexMetadata.getCreationVersion(), indexMetadata.getNumberOfShards(), Integer::sum); // Increment version-specific primary shard sizes - primaryByteCounts.compute(indexMetadata.getCreationVersion(), (v, i) -> { - String indexName = indexMetadata.getIndex().getName(); - long indexPrimarySize = indexPrimaryShardStats.getOrDefault(indexName, Collections.emptyList()) - .stream() - .mapToLong(stats -> stats.getStats().getStore().sizeInBytes()) - .sum(); - if (i == null) { - return indexPrimarySize; - } else { - return i + indexPrimarySize; - } - }); + String indexName = indexMetadata.getIndex().getName(); + long indexPrimarySize = indexPrimaryShardStats.getOrDefault(indexName, Collections.emptyList()) + .stream() + .mapToLong(stats -> stats.getStats().getStore().sizeInBytes()) + .sum(); + primaryByteCounts.merge(indexMetadata.getCreationVersion(), indexPrimarySize, Long::sum); } List calculatedStats = new ArrayList<>(indexCounts.size()); - for (Map.Entry indexVersionCount : indexCounts.entrySet()) { - Version v = indexVersionCount.getKey(); + for (Map.Entry indexVersionCount : indexCounts.entrySet()) { + IndexVersion v = indexVersionCount.getKey(); SingleVersionStats singleStats = new SingleVersionStats( v, indexVersionCount.getValue(), @@ -164,12 +146,12 @@ public String toString() { static class SingleVersionStats implements ToXContentObject, Writeable, Comparable { - public final Version version; + public final IndexVersion version; public final int indexCount; public final int primaryShardCount; public final long totalPrimaryByteCount; - SingleVersionStats(Version version, int indexCount, int primaryShardCount, long totalPrimaryByteCount) { + SingleVersionStats(IndexVersion version, int indexCount, int primaryShardCount, long totalPrimaryByteCount) { this.version = version; this.indexCount = indexCount; this.primaryShardCount = primaryShardCount; @@ -177,7 +159,7 @@ static class SingleVersionStats implements ToXContentObject, Writeable, Comparab } SingleVersionStats(StreamInput in) throws IOException { - this.version = Version.readVersion(in); + this.version = IndexVersion.readVersion(in); this.indexCount = in.readVInt(); this.primaryShardCount = in.readVInt(); this.totalPrimaryByteCount = in.readVLong(); @@ -196,7 +178,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { - Version.writeVersion(this.version, out); + IndexVersion.writeVersion(this.version, out); out.writeVInt(this.indexCount); out.writeVInt(this.primaryShardCount); out.writeVLong(this.totalPrimaryByteCount); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleConfig.java b/server/src/main/java/org/elasticsearch/action/downsample/DownsampleConfig.java similarity index 76% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleConfig.java rename to server/src/main/java/org/elasticsearch/action/downsample/DownsampleConfig.java index 049f6c543618..524133faf86f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleConfig.java +++ b/server/src/main/java/org/elasticsearch/action/downsample/DownsampleConfig.java @@ -1,10 +1,12 @@ /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. */ -package org.elasticsearch.xpack.core.downsample; + +package org.elasticsearch.action.downsample; import org.elasticsearch.common.Rounding; import org.elasticsearch.common.Strings; @@ -29,7 +31,7 @@ import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; /** - * This class holds the configuration details of a {@link DownsampleAction} that downsamples time series + * This class holds the configuration details of a DownsampleAction that downsamples time series * (TSDB) indices. We have made great effort to simplify the rollup configuration and currently * only requires a fixed time interval. So, it has the following format: * @@ -97,6 +99,37 @@ public DownsampleConfig(final StreamInput in) throws IOException { fixedInterval = new DateHistogramInterval(in); } + /** + * This method validates the target downsampling configuration can be applied on an index that has been + * already downsampled from the source configuration. The requirements are: + * - The target interval needs to be greater than source interval + * - The target interval needs to be a multiple of the source interval + * throws an IllegalArgumentException to signal that the target interval is not acceptable + */ + public static void validateSourceAndTargetIntervals(DownsampleConfig source, DownsampleConfig target) { + long sourceMillis = source.fixedInterval.estimateMillis(); + long targetMillis = target.fixedInterval.estimateMillis(); + if (sourceMillis >= targetMillis) { + // Downsampling interval must be greater than source interval + throw new IllegalArgumentException( + "Downsampling interval [" + + target.fixedInterval + + "] must be greater than the source index interval [" + + source.fixedInterval + + "]." + ); + } else if (targetMillis % sourceMillis != 0) { + // Downsampling interval must be a multiple of the source interval + throw new IllegalArgumentException( + "Downsampling interval [" + + target.fixedInterval + + "] must be a multiple of the source index interval [" + + source.fixedInterval + + "]." + ); + } + } + @Override public void writeTo(final StreamOutput out) throws IOException { fixedInterval.writeTo(out); @@ -154,11 +187,15 @@ public String getWriteableName() { public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { builder.startObject(); { - builder.field(FIXED_INTERVAL, fixedInterval.toString()); + toXContentFragment(builder); } return builder.endObject(); } + public XContentBuilder toXContentFragment(final XContentBuilder builder) throws IOException { + return builder.field(FIXED_INTERVAL, fixedInterval.toString()); + } + public static DownsampleConfig fromXContent(final XContentParser parser) throws IOException { return PARSER.parse(parser, null); } diff --git a/server/src/main/java/org/elasticsearch/action/support/CancellableFanOut.java b/server/src/main/java/org/elasticsearch/action/support/CancellableFanOut.java index eb7a51542243..5bf0a8fc44c3 100644 --- a/server/src/main/java/org/elasticsearch/action/support/CancellableFanOut.java +++ b/server/src/main/java/org/elasticsearch/action/support/CancellableFanOut.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.Strings; import org.elasticsearch.core.Nullable; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; @@ -94,7 +95,20 @@ public final void run(@Nullable Task task, Iterator itemsIterator, ActionL final ActionListener itemResponseListener = ActionListener.notifyOnce(new ActionListener<>() { @Override public void onResponse(ItemResponse itemResponse) { - onItemResponse(item, itemResponse); + try { + onItemResponse(item, itemResponse); + } catch (Exception e) { + logger.error( + () -> Strings.format( + "unexpected exception handling [%s] for item [%s] in [%s]", + itemResponse, + item, + CancellableFanOut.this + ), + e + ); + assert false : e; + } } @Override @@ -103,7 +117,7 @@ public void onFailure(Exception e) { // Completed on cancellation so it is released promptly, but there's no need to handle the exception. return; } - onItemFailure(item, e); + onItemFailure(item, e); // must not throw, enforced by the ActionListener#notifyOnce wrapper } @Override @@ -122,7 +136,7 @@ public String toString() { } // Process the item, capturing a ref to make sure the outer listener is completed after this item is processed. - sendItemRequest(item, ActionListener.releaseAfter(itemResponseListener, refs.acquire())); + ActionListener.run(ActionListener.releaseAfter(itemResponseListener, refs.acquire()), l -> sendItemRequest(item, l)); } } catch (Exception e) { // NB the listener may have been completed already (by exiting this try block) so this exception may not be sent to the caller, @@ -143,7 +157,8 @@ public String toString() { protected abstract void sendItemRequest(Item item, ActionListener listener); /** - * Handle a successful response for an item. May be called concurrently for multiple items. Not called if the task is cancelled. + * Handle a successful response for an item. May be called concurrently for multiple items. Not called if the task is cancelled. Must + * not throw any exceptions. *

* Note that it's easy to accidentally capture another reference to this class when implementing this method, and that will prevent the * early release of any accumulated results. Beware of lambdas, and test carefully. @@ -151,7 +166,8 @@ public String toString() { protected abstract void onItemResponse(Item item, ItemResponse itemResponse); /** - * Handle a failure for an item. May be called concurrently for multiple items. Not called if the task is cancelled. + * Handle a failure for an item. May be called concurrently for multiple items. Not called if the task is cancelled. Must not throw any + * exceptions. *

* Note that it's easy to accidentally capture another reference to this class when implementing this method, and that will prevent the * early release of any accumulated results. Beware of lambdas, and test carefully. diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index f660ee40f21e..17c2a68f2133 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -660,7 +660,7 @@ public interface ReplicaResponse { } public static class RetryOnPrimaryException extends ElasticsearchException { - RetryOnPrimaryException(ShardId shardId, String msg) { + public RetryOnPrimaryException(ShardId shardId, String msg) { this(shardId, msg, null); } diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java index caf2a50934ca..59d2a94a21c9 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java @@ -340,10 +340,10 @@ static void initializeProbes() { } static void checkLucene() { - if (IndexVersion.CURRENT.luceneVersion().equals(org.apache.lucene.util.Version.LATEST) == false) { + if (IndexVersion.current().luceneVersion().equals(org.apache.lucene.util.Version.LATEST) == false) { throw new AssertionError( "Lucene version mismatch this version of Elasticsearch requires lucene version [" - + IndexVersion.CURRENT.luceneVersion() + + IndexVersion.current().luceneVersion() + "] but the current lucene version is [" + org.apache.lucene.util.Version.LATEST + "]" diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index c5a72f97ea94..f7ff77b635a1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -188,7 +188,7 @@ public ClusterState(long version, String stateUUID, ClusterState state) { state.metadata(), state.routingTable(), state.nodes(), - state.transportVersions(), + state.transportVersions, state.blocks(), state.customs(), false, @@ -736,7 +736,7 @@ public Builder(ClusterState state) { this.version = state.version(); this.uuid = state.stateUUID(); this.nodes = state.nodes(); - this.transportVersions = new HashMap<>(state.transportVersions()); + this.transportVersions = new HashMap<>(state.transportVersions); this.routingTable = state.routingTable(); this.metadata = state.metadata(); this.blocks = state.blocks(); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java index fbd271aea248..e9529f9cdca1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java @@ -24,6 +24,8 @@ import org.elasticsearch.cluster.routing.RerouteService; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.common.Priority; +import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import java.util.ArrayList; @@ -146,7 +148,7 @@ public ClusterState execute(BatchExecutionContext batchExecutionContex ensureNodesCompatibility(node.getVersion(), minClusterNodeVersion, maxClusterNodeVersion); // we do this validation quite late to prevent race conditions between nodes joining and importing dangling indices // we have to reject nodes that don't support all indices we have in this cluster - ensureIndexCompatibility(node.getVersion(), initialState.getMetadata()); + ensureIndexCompatibility(node.getMinIndexVersion(), node.getMaxIndexVersion(), initialState.getMetadata()); nodesBuilder.add(node); transportVersions.put(node.getId(), transportVersion); nodesChanged = true; @@ -237,6 +239,11 @@ public ClusterState execute(BatchExecutionContext batchExecutionContex } } + @SuppressForbidden(reason = "maintaining ClusterState#transportVersions requires reading them") + private static Map getTransportVersions(ClusterState clusterState) { + return clusterState.transportVersions(); + } + protected ClusterState.Builder becomeMasterAndTrimConflictingNodes( ClusterState currentState, List> taskContexts, @@ -258,7 +265,7 @@ protected ClusterState.Builder becomeMasterAndTrimConflictingNodes( assert currentState.term() < term : term + " vs " + currentState; DiscoveryNodes currentNodes = currentState.nodes(); DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(currentNodes); - Map transportVersions = new HashMap<>(currentState.transportVersions()); + Map transportVersions = new HashMap<>(getTransportVersions(currentState)); nodesBuilder.masterNodeId(currentState.nodes().getLocalNodeId()); for (final var taskContext : taskContexts) { @@ -316,35 +323,34 @@ private static void blockForbiddenVersions(TransportVersion joiningTransportVers } /** - * Ensures that all indices are compatible with the given node version. This will ensure that all indices in the given metadata + * Ensures that all indices are compatible with the given index version. This will ensure that all indices in the given metadata * will not be created with a newer version of elasticsearch as well as that all indices are newer or equal to the minimum index * compatibility version. - * @see Version#minimumIndexCompatibilityVersion() + * @see IndexVersion#MINIMUM_COMPATIBLE * @throws IllegalStateException if any index is incompatible with the given version */ - public static void ensureIndexCompatibility(final Version nodeVersion, Metadata metadata) { - Version supportedIndexVersion = nodeVersion.minimumIndexCompatibilityVersion(); + public static void ensureIndexCompatibility(IndexVersion minSupportedVersion, IndexVersion maxSupportedVersion, Metadata metadata) { // we ensure that all indices in the cluster we join are compatible with us no matter if they are // closed or not we can't read mappings of these indices so we need to reject the join... for (IndexMetadata idxMetadata : metadata) { - if (idxMetadata.getCompatibilityVersion().after(nodeVersion)) { + if (idxMetadata.getCompatibilityVersion().after(maxSupportedVersion)) { throw new IllegalStateException( "index " + idxMetadata.getIndex() + " version not supported: " + idxMetadata.getCompatibilityVersion() - + " the node version is: " - + nodeVersion + + " maximum compatible index version is: " + + maxSupportedVersion ); } - if (idxMetadata.getCompatibilityVersion().before(supportedIndexVersion)) { + if (idxMetadata.getCompatibilityVersion().before(minSupportedVersion)) { throw new IllegalStateException( "index " + idxMetadata.getIndex() + " version not supported: " + idxMetadata.getCompatibilityVersion() + " minimum compatible index version is: " - + supportedIndexVersion + + minSupportedVersion ); } } @@ -427,7 +433,7 @@ public static Collection> addBuiltInJoin final Collection> validators = new ArrayList<>(); validators.add((node, state) -> { ensureNodesCompatibility(node.getVersion(), state.getNodes()); - ensureIndexCompatibility(node.getVersion(), state.getMetadata()); + ensureIndexCompatibility(node.getMinIndexVersion(), node.getMaxIndexVersion(), state.getMetadata()); }); validators.addAll(onJoinValidators); return Collections.unmodifiableCollection(validators); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeLeftExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeLeftExecutor.java index 18144b8e0e5a..995066106e8c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeLeftExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeLeftExecutor.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.MasterService; +import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import java.util.HashMap; @@ -49,11 +50,16 @@ public NodeLeftExecutor(AllocationService allocationService) { this.allocationService = allocationService; } + @SuppressForbidden(reason = "maintaining ClusterState#transportVersions requires reading them") + private static Map getTransportVersions(ClusterState clusterState) { + return clusterState.transportVersions(); + } + @Override public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { ClusterState initialState = batchExecutionContext.initialState(); DiscoveryNodes.Builder remainingNodesBuilder = DiscoveryNodes.builder(initialState.nodes()); - Map transportVersions = new HashMap<>(initialState.transportVersions()); + Map transportVersions = new HashMap<>(getTransportVersions(initialState)); boolean removed = false; for (final var taskContext : batchExecutionContext.taskContexts()) { final var task = taskContext.getTask(); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataLifecycle.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataLifecycle.java index 33f9d19b5131..78399e1a19dc 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataLifecycle.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataLifecycle.java @@ -10,6 +10,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.Strings; @@ -20,18 +21,27 @@ import org.elasticsearch.common.util.FeatureFlag; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.xcontent.AbstractObjectParser; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; +import java.util.List; import java.util.Objects; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + /** - * Holds the Data Lifecycle Management metadata that are configuring how a data stream is managed. + * Holds the Data Lifecycle Management metadata that are configuring how a data stream is managed. Currently, it supports the following + * configurations: + * - data retention + * - downsampling */ public class DataLifecycle implements SimpleDiffable, ToXContentObject { @@ -45,15 +55,16 @@ public class DataLifecycle implements SimpleDiffable, ToXContentO private static final FeatureFlag DLM_FEATURE_FLAG = new FeatureFlag("dlm"); - public static final String DLM_ORIGIN = "data_lifecycle"; + public static final String DATA_STREAM_LIFECYCLE_ORIGIN = "data_stream_lifecycle"; public static final ParseField DATA_RETENTION_FIELD = new ParseField("data_retention"); + public static final ParseField DOWNSAMPLING_FIELD = new ParseField("downsampling"); private static final ParseField ROLLOVER_FIELD = new ParseField("rollover"); public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "lifecycle", false, - (args, unused) -> new DataLifecycle((Retention) args[0]) + (args, unused) -> new DataLifecycle((Retention) args[0], (Downsampling) args[1]) ); static { @@ -65,6 +76,13 @@ public class DataLifecycle implements SimpleDiffable, ToXContentO return new Retention(TimeValue.parseTimeValue(value, DATA_RETENTION_FIELD.getPreferredName())); } }, DATA_RETENTION_FIELD, ObjectParser.ValueType.STRING_OR_NULL); + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> { + if (p.currentToken() == XContentParser.Token.VALUE_NULL) { + return Downsampling.NULL; + } else { + return new Downsampling(AbstractObjectParser.parseArray(p, c, Downsampling.Round::fromXContent)); + } + }, DOWNSAMPLING_FIELD, ObjectParser.ValueType.OBJECT_ARRAY_OR_NULL); } public static boolean isEnabled() { @@ -73,17 +91,20 @@ public static boolean isEnabled() { @Nullable private final Retention dataRetention; + @Nullable + private final Downsampling downsampling; public DataLifecycle() { this((TimeValue) null); } public DataLifecycle(@Nullable TimeValue dataRetention) { - this(dataRetention == null ? null : new Retention(dataRetention)); + this(dataRetention == null ? null : new Retention(dataRetention), null); } - public DataLifecycle(@Nullable Retention dataRetention) { + public DataLifecycle(@Nullable Retention dataRetention, @Nullable Downsampling downsampling) { this.dataRetention = dataRetention; + this.downsampling = downsampling; } public DataLifecycle(long timeInMills) { @@ -113,18 +134,36 @@ Retention getDataRetention() { return dataRetention; } + /** + * The configured downsampling rounds with the `after` and the `fixed_interval` per round. If downsampling is + * not configured then it returns null. + */ + @Nullable + public List getDownsamplingRounds() { + return downsampling == null ? null : downsampling.rounds(); + } + + /** + * Returns the configured wrapper object as it was defined in the template. This should be used only during + * template composition. + */ + @Nullable + Downsampling getDownsampling() { + return downsampling; + } + @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; final DataLifecycle that = (DataLifecycle) o; - return Objects.equals(dataRetention, that.dataRetention); + return Objects.equals(dataRetention, that.dataRetention) && Objects.equals(downsampling, that.downsampling); } @Override public int hashCode() { - return Objects.hash(dataRetention); + return Objects.hash(dataRetention, downsampling); } @Override @@ -132,6 +171,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_007)) { out.writeOptionalWriteable(dataRetention); } + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_026)) { + out.writeOptionalWriteable(downsampling); + } } public DataLifecycle(StreamInput in) throws IOException { @@ -140,6 +182,11 @@ public DataLifecycle(StreamInput in) throws IOException { } else { dataRetention = null; } + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_026)) { + downsampling = in.readOptionalWriteable(Downsampling::read); + } else { + downsampling = null; + } } public static Diff readDiffFrom(StreamInput in) throws IOException { @@ -169,6 +216,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params, @Nulla builder.field(DATA_RETENTION_FIELD.getPreferredName(), dataRetention.value().getStringRep()); } } + if (downsampling != null) { + builder.field(DOWNSAMPLING_FIELD.getPreferredName()); + downsampling.toXContent(builder, params); + } if (rolloverConfiguration != null) { builder.field(ROLLOVER_FIELD.getPreferredName()); rolloverConfiguration.evaluateAndConvertToXContent(builder, params, getEffectiveDataRetention()); @@ -187,18 +238,25 @@ public static DataLifecycle fromXContent(XContentParser parser) throws IOExcepti static class Builder { @Nullable private Retention dataRetention = null; + @Nullable + private Downsampling downsampling = null; Builder dataRetention(@Nullable Retention value) { dataRetention = value; return this; } + Builder downsampling(@Nullable Downsampling value) { + downsampling = value; + return this; + } + DataLifecycle build() { - return new DataLifecycle(dataRetention); + return new DataLifecycle(dataRetention, downsampling); } static Builder newBuilder(DataLifecycle dataLifecycle) { - return new Builder().dataRetention(dataLifecycle.getDataRetention()); + return new Builder().dataRetention(dataLifecycle.getDataRetention()).downsampling(dataLifecycle.getDownsampling()); } } @@ -220,4 +278,122 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalTimeValue(value); } } + + /** + * Downsampling holds the configuration about when should elasticsearch downsample a backing index. + * @param rounds is a list of downsampling configuration which instructs when a backing index should be downsampled (`after`) and at + * which interval (`fixed_interval`). Null represents an explicit no downsampling during template composition. + */ + public record Downsampling(@Nullable List rounds) implements Writeable, ToXContentFragment { + + /** + * A round represents the configuration for when and how elasticsearch will downsample a backing index. + * @param after is a TimeValue configuring how old (based on generation age) should a backing index be before downsampling + * @param config contains the interval that the backing index is going to be downsampled. + */ + public record Round(TimeValue after, DownsampleConfig config) implements Writeable, ToXContentObject { + + public static final ParseField AFTER_FIELD = new ParseField("after"); + public static final ParseField FIXED_INTERVAL_FIELD = new ParseField("fixed_interval"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "downsampling_round", + false, + (args, unused) -> new Round((TimeValue) args[0], new DownsampleConfig((DateHistogramInterval) args[1])) + ); + + static { + PARSER.declareString( + ConstructingObjectParser.optionalConstructorArg(), + value -> TimeValue.parseTimeValue(value, AFTER_FIELD.getPreferredName()), + AFTER_FIELD + ); + PARSER.declareField( + constructorArg(), + p -> new DateHistogramInterval(p.text()), + new ParseField(FIXED_INTERVAL_FIELD.getPreferredName()), + ObjectParser.ValueType.STRING + ); + } + + public static Round read(StreamInput in) throws IOException { + return new Round(in.readTimeValue(), new DownsampleConfig(in)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeTimeValue(after); + out.writeWriteable(config); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(AFTER_FIELD.getPreferredName(), after.getStringRep()); + config.toXContentFragment(builder); + builder.endObject(); + return builder; + } + + public static Round fromXContent(XContentParser parser, Void context) throws IOException { + return PARSER.parse(parser, context); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + } + + // For testing + public static final Downsampling NULL = new Downsampling(null); + + public Downsampling { + if (rounds != null) { + if (rounds.isEmpty()) { + throw new IllegalArgumentException("Downsampling configuration should have at least one round configured."); + } + Round previous = null; + for (Round round : rounds) { + if (previous == null) { + previous = round; + } else { + if (round.after.compareTo(previous.after) < 0) { + throw new IllegalArgumentException( + "A downsampling round must have a later 'after' value than the proceeding, " + + round.after.getStringRep() + + " is not after " + + previous.after.getStringRep() + + "." + ); + } + DownsampleConfig.validateSourceAndTargetIntervals(previous.config(), round.config()); + } + } + } + } + + public static Downsampling read(StreamInput in) throws IOException { + return new Downsampling(in.readOptionalList(Round::read)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalCollection(rounds, (o, v) -> v.writeTo(o)); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (rounds == null) { + builder.nullValue(); + } else { + builder.startArray(); + for (Round round : rounds) { + round.toXContent(builder, params); + } + builder.endArray(); + } + return builder; + } + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java index 8574378e563d..68c227dc5bae 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java @@ -1035,23 +1035,23 @@ public long primaryTerm(int shardId) { } /** - * Return the {@link Version} on which this index has been created. This + * Return the {@link IndexVersion} on which this index has been created. This * information is typically useful for backward compatibility. * To check index compatibility (e.g. N-1 checks), use {@link #getCompatibilityVersion()} instead. */ - public Version getCreationVersion() { - return indexCreatedVersion.toVersion(); + public IndexVersion getCreationVersion() { + return indexCreatedVersion; } /** - * Return the {@link Version} that this index provides compatibility for. - * This is typically compared to the {@link Version#minimumIndexCompatibilityVersion()} to figure out whether the index can be handled + * Return the {@link IndexVersion} that this index provides compatibility for. + * This is typically compared to the {@link IndexVersion#MINIMUM_COMPATIBLE} to figure out whether the index can be handled * by the cluster. * By default, this is equal to the {@link #getCreationVersion()}, but can also be a newer version if the index has been imported as * a legacy index from an older snapshot, and its metadata has been converted to be handled by newer version nodes. */ - public Version getCompatibilityVersion() { - return indexCompatibilityVersion.toVersion(); + public IndexVersion getCompatibilityVersion() { + return indexCompatibilityVersion; } public long getCreationDate() { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifier.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifier.java index a96b223b040b..1488b3d8bb16 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifier.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifier.java @@ -82,7 +82,7 @@ public IndexMetadataVerifier( * If the index does not need upgrade it returns the index metadata unchanged, otherwise it returns a modified index metadata. If index * cannot be updated the method throws an exception. */ - public IndexMetadata verifyIndexMetadata(IndexMetadata indexMetadata, Version minimumIndexCompatibilityVersion) { + public IndexMetadata verifyIndexMetadata(IndexMetadata indexMetadata, IndexVersion minimumIndexCompatibilityVersion) { checkSupportedVersion(indexMetadata, minimumIndexCompatibilityVersion); // First convert any shared_cache searchable snapshot indices to only use _tier_preference: data_frozen @@ -102,7 +102,7 @@ public IndexMetadata verifyIndexMetadata(IndexMetadata indexMetadata, Version mi * Check that the index version is compatible. Elasticsearch does not support indices created before the * previous major version. */ - private static void checkSupportedVersion(IndexMetadata indexMetadata, Version minimumIndexCompatibilityVersion) { + private static void checkSupportedVersion(IndexMetadata indexMetadata, IndexVersion minimumIndexCompatibilityVersion) { boolean isSupportedVersion = indexMetadata.getCompatibilityVersion().onOrAfter(minimumIndexCompatibilityVersion); if (isSupportedVersion == false) { throw new IllegalStateException( @@ -113,7 +113,7 @@ private static void checkSupportedVersion(IndexMetadata indexMetadata, Version m + "] but the minimum compatible version is [" + minimumIndexCompatibilityVersion + "]. It should be re-indexed in Elasticsearch " - + minimumIndexCompatibilityVersion.major + + (Version.CURRENT.major - 1) + ".x before upgrading to " + Version.CURRENT + "." diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index 7071b415d2e7..e5a41988f30b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -29,6 +29,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.indices.InvalidIndexNameException; import org.elasticsearch.indices.SystemIndices; @@ -60,6 +61,7 @@ public class IndexNameExpressionResolver { public static final String EXCLUDED_DATA_STREAMS_KEY = "es.excluded_ds"; public static final Version SYSTEM_INDEX_ENFORCEMENT_VERSION = Version.V_8_0_0; + public static final IndexVersion SYSTEM_INDEX_ENFORCEMENT_INDEX_VERSION = IndexVersion.V_8_0_0; private final ThreadContext threadContext; private final SystemIndices systemIndices; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index 8b8a2f707816..79409ca03247 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -638,7 +638,7 @@ public Metadata withAddedIndex(IndexMetadata index) { updatedVisibleClosedIndices, null, updatedMappingsByHash, - IndexVersion.min(IndexVersion.fromId(index.getCompatibilityVersion().id), oldestIndexVersion), + IndexVersion.min(index.getCompatibilityVersion(), oldestIndexVersion), reservedStateMetadata ); } @@ -2272,7 +2272,7 @@ public Metadata build(boolean skipNameCollisionChecks) { final List visibleClosedIndices = new ArrayList<>(); final ImmutableOpenMap indicesMap = indices.build(); - int oldestIndexVersionId = IndexVersion.CURRENT.id(); + int oldestIndexVersionId = IndexVersion.current().id(); int totalNumberOfShards = 0; int totalOpenIndexShards = 0; @@ -2300,7 +2300,7 @@ public Metadata build(boolean skipNameCollisionChecks) { visibleClosedIndices.add(name); } } - oldestIndexVersionId = Math.min(oldestIndexVersionId, indexMetadata.getCompatibilityVersion().id); + oldestIndexVersionId = Math.min(oldestIndexVersionId, indexMetadata.getCompatibilityVersion().id()); if (sha256HashesInUse != null) { final var mapping = indexMetadata.mapping(); if (mapping != null) { @@ -2567,7 +2567,7 @@ private static void validateAlias(String aliasName, List indexMet if (isNonEmpty(groupedBySystemStatus.get(false)) && isNonEmpty(groupedBySystemStatus.get(true))) { final List newVersionSystemIndices = groupedBySystemStatus.get(true) .stream() - .filter(i -> i.getCreationVersion().onOrAfter(IndexNameExpressionResolver.SYSTEM_INDEX_ENFORCEMENT_VERSION)) + .filter(i -> i.getCreationVersion().onOrAfter(IndexNameExpressionResolver.SYSTEM_INDEX_ENFORCEMENT_INDEX_VERSION)) .map(i -> i.getIndex().getName()) .sorted() // reliable error message for testing .toList(); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java index df4414785c52..2e049250830f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java @@ -1042,7 +1042,7 @@ static Settings aggregateIndexSettings( if (indexSettingsBuilder.get(IndexMetadata.SETTING_VERSION_CREATED) == null) { DiscoveryNodes nodes = currentState.nodes(); - IndexVersion createdVersion = IndexVersion.min(IndexVersion.CURRENT, nodes.getMaxDataNodeCompatibleIndexVersion()); + IndexVersion createdVersion = IndexVersion.min(IndexVersion.current(), nodes.getMaxDataNodeCompatibleIndexVersion()); indexSettingsBuilder.put(IndexMetadata.SETTING_VERSION_CREATED, createdVersion.toVersion()); } if (INDEX_NUMBER_OF_SHARDS_SETTING.exists(indexSettingsBuilder) == false) { @@ -1574,13 +1574,13 @@ static void prepareResizeIndexSettings( builder.put(sourceMetadata.getSettings().filter(sourceSettingsPredicate)); } - indexSettingsBuilder.put(IndexMetadata.SETTING_VERSION_CREATED, sourceMetadata.getCreationVersion()) + indexSettingsBuilder.put(IndexMetadata.SETTING_VERSION_CREATED, sourceMetadata.getCreationVersion().id()) .put(builder.build()) .put(IndexMetadata.SETTING_ROUTING_PARTITION_SIZE, sourceMetadata.getRoutingPartitionSize()) .put(IndexMetadata.INDEX_RESIZE_SOURCE_NAME.getKey(), resizeSourceIndex.getName()) .put(IndexMetadata.INDEX_RESIZE_SOURCE_UUID.getKey(), resizeSourceIndex.getUUID()); if (sourceMetadata.getSettings().hasValue(IndexMetadata.SETTING_VERSION_COMPATIBILITY)) { - indexSettingsBuilder.put(IndexMetadata.SETTING_VERSION_COMPATIBILITY, sourceMetadata.getCompatibilityVersion()); + indexSettingsBuilder.put(IndexMetadata.SETTING_VERSION_COMPATIBILITY, sourceMetadata.getCompatibilityVersion().id()); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java index a0e059d4b8c4..767516021e6c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java @@ -1136,10 +1136,7 @@ private ClusterState openIndices(final Index[] indices, final ClusterState curre // The index might be closed because we couldn't import it due to an old incompatible // version, so we need to verify its compatibility. - newIndexMetadata = indexMetadataVerifier.verifyIndexMetadata( - newIndexMetadata, - minIndexCompatibilityVersion.toVersion() - ); + newIndexMetadata = indexMetadataVerifier.verifyIndexMetadata(newIndexMetadata, minIndexCompatibilityVersion); try { indicesService.verifyIndexMetadata(newIndexMetadata, newIndexMetadata); } catch (Exception e) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java index 0e0e7101a21a..c3884618ffe1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java @@ -1516,6 +1516,9 @@ public static DataLifecycle composeDataLifecycles(List lifecycles if (current.getDataRetention() != null) { builder.dataRetention(current.getDataRetention()); } + if (current.getDownsampling() != null) { + builder.downsampling(current.getDownsampling()); + } } } return builder == null ? null : builder.build(); diff --git a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java index fdd01dcbd41f..3f3be87f7cf6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java @@ -153,43 +153,6 @@ public static boolean isDedicatedFrozenNode(final Settings settings) { private final Set roleNames; private final String externalId; - /** - * Creates a new {@link DiscoveryNode} - *

- * Note: if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current - * version. it corresponds to the minimum version this elasticsearch version can communicate with. If a higher version is used - * the node might not be able to communicate with the remote node. After initial handshakes node versions will be discovered - * and updated. - *

- * - * @param nodeName the nodes name - * @param nodeId the nodes unique persistent id. An ephemeral id will be auto generated. - * @param address the nodes transport address - * @param attributes node attributes - * @param roles node roles - * @param version the version of the node - */ - public DiscoveryNode( - @Nullable String nodeName, - String nodeId, - TransportAddress address, - Map attributes, - Set roles, - @Nullable Version version - ) { - this( - nodeName, - nodeId, - UUIDs.randomBase64UUID(), - address.address().getHostString(), - address.getAddress(), - address, - attributes, - roles, - VersionInformation.inferVersions(version) - ); - } - /** * Creates a new {@link DiscoveryNode} *

@@ -348,7 +311,7 @@ private static VersionInformation inferVersionInformation(Version version) { IndexVersion.fromId(version.id) ); } else { - return new VersionInformation(version, IndexVersion.MINIMUM_COMPATIBLE, IndexVersion.CURRENT); + return new VersionInformation(version, IndexVersion.MINIMUM_COMPATIBLE, IndexVersion.current()); } } @@ -642,8 +605,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } builder.endArray(); builder.field("version", versionInfo.nodeVersion()); - builder.field("minIndexVersion", versionInfo.minIndexVersion()); - builder.field("maxIndexVersion", versionInfo.maxIndexVersion()); + builder.field("min_index_version", versionInfo.minIndexVersion()); + builder.field("max_index_version", versionInfo.maxIndexVersion()); builder.endObject(); return builder; } diff --git a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java index 81f7286161dc..dba4f8c3a7dc 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java @@ -874,7 +874,7 @@ public DiscoveryNodes build() { Objects.requireNonNullElse(minNonClientNodeVersion, Version.CURRENT), Objects.requireNonNullElse(maxNodeVersion, Version.CURRENT), Objects.requireNonNullElse(minNodeVersion, Version.CURRENT.minimumCompatibilityVersion()), - Objects.requireNonNullElse(maxDataNodeCompatibleIndexVersion, IndexVersion.CURRENT), + Objects.requireNonNullElse(maxDataNodeCompatibleIndexVersion, IndexVersion.current()), Objects.requireNonNullElse(minSupportedIndexVersion, IndexVersion.MINIMUM_COMPATIBLE), dataNodes.values() .stream() diff --git a/server/src/main/java/org/elasticsearch/cluster/node/VersionInformation.java b/server/src/main/java/org/elasticsearch/cluster/node/VersionInformation.java index 87c663daf2e4..84b5946c8e19 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/VersionInformation.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/VersionInformation.java @@ -24,7 +24,7 @@ public record VersionInformation(Version nodeVersion, IndexVersion minIndexVersi public static final VersionInformation CURRENT = new VersionInformation( Version.CURRENT, IndexVersion.MINIMUM_COMPATIBLE, - IndexVersion.CURRENT + IndexVersion.current() ); public static VersionInformation inferVersions(Version nodeVersion) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/BatchedRerouteService.java b/server/src/main/java/org/elasticsearch/cluster/routing/BatchedRerouteService.java index e462e1da5450..e34703c25add 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/BatchedRerouteService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/BatchedRerouteService.java @@ -147,7 +147,7 @@ public void onFailure(Exception e) { e ); } - ActionListener.onFailure(currentListeners, new ElasticsearchException("delayed reroute [" + reason + "] failed", e)); + ActionListener.onFailure(currentListeners, e); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java index cc0bb6fd4323..bd15d924c9c1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java @@ -613,7 +613,6 @@ public IndexShardRoutingTable build() { assert distinctNodes(shards) : "more than one shard with same id assigned to same node (shards: " + shards + ")"; assert noDuplicatePrimary(shards) : "expected but did not find unique primary in shard routing table: " + shards; assert noAssignedReplicaWithoutActivePrimary(shards) : "unexpected assigned replica with no active primary: " + shards; - assert noRelocatingUnsearchableShards(shards) : "unexpected RELOCATING unsearchable shard: " + shards; return new IndexShardRoutingTable(shardId, shards); } @@ -664,14 +663,6 @@ static boolean noAssignedReplicaWithoutActivePrimary(List shards) return seenAssignedReplica == false; } - static boolean noRelocatingUnsearchableShards(List shards) { - // this is unsupported until ES-4677 is implemented - for (var shard : shards) { - assert shard.role().isSearchable() || shard.relocating() == false : "unexpected RELOCATING unsearchable shard: " + shard; - } - return true; - } - public static IndexShardRoutingTable.Builder readFrom(StreamInput in) throws IOException { Index index = new Index(in); return readFromThin(in, index); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RecoverySource.java b/server/src/main/java/org/elasticsearch/cluster/routing/RecoverySource.java index d3a9cf548b06..bb4eef2bd422 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RecoverySource.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RecoverySource.java @@ -8,11 +8,11 @@ package org.elasticsearch.cluster.routing; -import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.xcontent.ToXContent; @@ -206,9 +206,9 @@ public static class SnapshotRecoverySource extends RecoverySource { private final String restoreUUID; private final Snapshot snapshot; private final IndexId index; - private final Version version; + private final IndexVersion version; - public SnapshotRecoverySource(String restoreUUID, Snapshot snapshot, Version version, IndexId indexId) { + public SnapshotRecoverySource(String restoreUUID, Snapshot snapshot, IndexVersion version, IndexId indexId) { this.restoreUUID = restoreUUID; this.snapshot = Objects.requireNonNull(snapshot); this.version = Objects.requireNonNull(version); @@ -218,7 +218,7 @@ public SnapshotRecoverySource(String restoreUUID, Snapshot snapshot, Version ver SnapshotRecoverySource(StreamInput in) throws IOException { restoreUUID = in.readString(); snapshot = new Snapshot(in); - version = Version.readVersion(in); + version = IndexVersion.readVersion(in); index = new IndexId(in); } @@ -240,7 +240,7 @@ public IndexId index() { return index; } - public Version version() { + public IndexVersion version() { return version; } @@ -248,7 +248,7 @@ public Version version() { protected void writeAdditionalFields(StreamOutput out) throws IOException { out.writeString(restoreUUID); snapshot.writeTo(out); - Version.writeVersion(version, out); + IndexVersion.writeVersion(version, out); index.writeTo(out); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index 0856472bccf7..c4f827f80750 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -459,30 +459,6 @@ public Tuple relocateShard( return Tuple.tuple(source, target); } - public void relocateOrReinitializeShard( - ShardRouting startedShard, - String nodeId, - long expectedShardSize, - RoutingChangesObserver changes - ) { - if (startedShard.isSearchable() == false) { - remove(startedShard); - var unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.REINITIALIZED, "relocating unsearchable shard"); - var assignedShards = assignedShards(startedShard.shardId()); - var promotableShard = assignedShards.stream().filter(ShardRouting::isPromotableToPrimary).findAny(); - assert promotableShard.isEmpty() : "multiple promotable shards are not supported yet"; - // replicas needs to be removed as well as they could not be active when primary is unassigned - // see org.elasticsearch.cluster.routing.IndexShardRoutingTable.Builder.noAssignedReplicaWithoutActivePrimary - for (ShardRouting replica : List.copyOf(assignedShards)) { - remove(replica); - unassignedShards.ignoreShard(replica.moveToUnassigned(unassignedInfo), AllocationStatus.NO_ATTEMPT, changes); - } - initializeShard(startedShard.moveToUnassigned(unassignedInfo), nodeId, null, expectedShardSize, changes); - } else { - relocateShard(startedShard, nodeId, expectedShardSize, changes); - } - } - /** * Applies the relevant logic to start an initializing shard. * @@ -533,7 +509,7 @@ public ShardRouting startShard( routing, new UnassignedInfo(UnassignedInfo.Reason.REINITIALIZED, "primary changed") ); - relocateOrReinitializeShard( + relocateShard( startedReplica, sourceShard.relocatingNodeId(), sourceShard.getExpectedShardSize(), diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java index b286f74bde30..5e66c241afbe 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java @@ -11,7 +11,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.ArrayUtil; -import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MetadataIndexStateService; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -27,6 +26,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.gateway.PriorityComparator; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.threadpool.ThreadPool; @@ -145,7 +145,7 @@ private boolean allocateUnassignedInvariant() { final var shardCounts = allocation.metadata().stream().filter(indexMetadata -> // skip any pre-7.2 closed indices which have no routing table entries at all - indexMetadata.getCreationVersion().onOrAfter(Version.V_7_2_0) + indexMetadata.getCreationVersion().onOrAfter(IndexVersion.V_7_2_0) || indexMetadata.getState() == IndexMetadata.State.OPEN || MetadataIndexStateService.isIndexVerifiedBeforeClosed(indexMetadata)) .flatMap( @@ -378,7 +378,7 @@ private void moveShards() { final var moveTarget = findRelocationTarget(shardRouting, assignment.nodeIds()); if (moveTarget != null) { logger.debug("Moving shard {} from {} to {}", shardRouting.shardId(), shardRouting.currentNodeId(), moveTarget.getId()); - routingNodes.relocateOrReinitializeShard( + routingNodes.relocateShard( shardRouting, moveTarget.getId(), allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), @@ -443,7 +443,7 @@ private void balance() { rebalanceTarget.getId() ); - routingNodes.relocateOrReinitializeShard( + routingNodes.relocateShard( shardRouting, rebalanceTarget.getId(), allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java index 23a579988d58..8b9d5a402634 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java @@ -168,7 +168,7 @@ public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) // its being throttled, maybe have a flag to take it into account and fail? for now, just do it since the "user" wants it... } allocation.routingNodes() - .relocateOrReinitializeShard( + .relocateShard( shardRouting, toRoutingNode.nodeId(), allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java index 78c92f47f2b1..bc97533c735e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java @@ -118,14 +118,14 @@ private static Decision isVersionCompatible( final RoutingNode target, final RoutingAllocation allocation ) { - if (target.node().getVersion().onOrAfter(recoverySource.version())) { + if (target.node().getVersion().onOrAfter(recoverySource.version().toVersion())) { /* we can allocate if we can restore from a snapshot that is older or on the same version */ return allocation.decision( Decision.YES, NAME, "node version [%s] is the same or newer than snapshot version [%s]", target.node().getVersion(), - recoverySource.version() + recoverySource.version().toVersion() ); } else { return allocation.decision( @@ -133,7 +133,7 @@ private static Decision isVersionCompatible( NAME, "node version [%s] is older than the snapshot version [%s]", target.node().getVersion(), - recoverySource.version() + recoverySource.version().toVersion() ); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java b/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java index c650ecbc81ae..d22c22a22be1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java @@ -23,6 +23,7 @@ import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.common.Priority; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; @@ -112,6 +113,11 @@ public ClusterState execute(BatchExecutionContext cont } } + @SuppressForbidden(reason = "maintaining ClusterState#transportVersions requires reading them") + private static Map getTransportVersions(ClusterState clusterState) { + return clusterState.transportVersions(); + } + @Override public void clusterChanged(ClusterChangedEvent event) { if (event.localNodeMaster() == false) return; // only if we're master @@ -123,9 +129,7 @@ public void clusterChanged(ClusterChangedEvent event) { && event.state().getMinTransportVersion().equals(INFERRED_TRANSPORT_VERSION)) { // find all the relevant nodes - Set nodes = event.state() - .transportVersions() - .entrySet() + Set nodes = getTransportVersions(event.state()).entrySet() .stream() .filter(e -> e.getValue().equals(INFERRED_TRANSPORT_VERSION)) .map(Map.Entry::getKey) diff --git a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index f35228354404..5abac6d1af09 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -29,10 +29,8 @@ import java.util.concurrent.CopyOnWriteArrayList; import java.util.function.BiConsumer; import java.util.function.Consumer; -import java.util.function.Function; import java.util.function.Predicate; import java.util.regex.Pattern; -import java.util.stream.Collectors; /** * A basic setting service that can be used for per-index and per-cluster settings. @@ -51,24 +49,14 @@ public abstract class AbstractScopedSettings { private final List> settingUpdaters = new CopyOnWriteArrayList<>(); private final Map> complexMatchers; private final Map> keySettings; - private final Map, SettingUpgrader> settingUpgraders; private final Setting.Property scope; private Settings lastSettingsApplied; - protected AbstractScopedSettings( - final Settings settings, - final Set> settingsSet, - final Set> settingUpgraders, - final Setting.Property scope - ) { + protected AbstractScopedSettings(final Settings settings, final Set> settingsSet, final Setting.Property scope) { this.logger = LogManager.getLogger(this.getClass()); this.settings = settings; this.lastSettingsApplied = Settings.EMPTY; - this.settingUpgraders = Collections.unmodifiableMap( - settingUpgraders.stream().collect(Collectors.toMap(SettingUpgrader::getSetting, Function.identity())) - ); - this.scope = scope; Map> complexMatchers = new HashMap<>(); Map> keySettings = new HashMap<>(); @@ -115,7 +103,6 @@ protected AbstractScopedSettings(Settings nodeSettings, Settings scopeSettings, this.scope = other.scope; complexMatchers = other.complexMatchers; keySettings = other.keySettings; - settingUpgraders = Map.copyOf(other.settingUpgraders); settingUpdaters.addAll(other.settingUpdaters); } @@ -882,42 +869,6 @@ private static Setting findOverlappingSetting(Setting newSetting, Map setting = getRaw(key); - final SettingUpgrader upgrader = settingUpgraders.get(setting); - if (upgrader == null) { - // the setting does not have an upgrader, copy the setting - builder.copy(key, settings); - } else { - // the setting has an upgrader, so mark that we have changed a setting and apply the upgrade logic - changed = true; - // noinspection ConstantConditions - if (setting.getConcreteSetting(key).isListSetting()) { - final List value = settings.getAsList(key); - final String upgradedKey = upgrader.getKey(key); - final List upgradedValue = upgrader.getListValue(value); - builder.putList(upgradedKey, upgradedValue); - } else { - final String value = settings.get(key); - final String upgradedKey = upgrader.getKey(key); - final String upgradedValue = upgrader.getValue(value); - builder.put(upgradedKey, upgradedValue); - } - } - } - // we only return a new instance if there was an upgrade - return changed ? builder.build() : settings; - } - /** * Archives invalid or unknown settings. Any setting that is not recognized or fails validation * will be archived. This means the setting is prefixed with {@value ARCHIVED_SETTINGS_PREFIX} diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 219e2cc00a88..7e76baf479f6 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -125,8 +125,6 @@ import org.elasticsearch.transport.TransportSettings; import org.elasticsearch.watcher.ResourceWatcherService; -import java.util.Collections; -import java.util.List; import java.util.Objects; import java.util.Set; import java.util.function.Predicate; @@ -147,11 +145,7 @@ public static ClusterSettings createBuiltInClusterSettings(Settings nodeSettings } public ClusterSettings(final Settings nodeSettings, final Set> settingsSet) { - this(nodeSettings, settingsSet, Collections.emptySet()); - } - - public ClusterSettings(final Settings nodeSettings, final Set> settingsSet, final Set> settingUpgraders) { - super(nodeSettings, settingsSet, settingUpgraders, Property.NodeScope); + super(nodeSettings, settingsSet, Property.NodeScope); addSettingsUpdater(new LoggingSettingUpdater(nodeSettings)); } @@ -580,7 +574,4 @@ public void apply(Settings value, Settings current, Settings previous) { IngestSettings.GROK_WATCHDOG_MAX_EXECUTION_TIME, TDigestExecutionHint.SETTING ).filter(Objects::nonNull).collect(Collectors.toSet()); - - static List> BUILT_IN_SETTING_UPGRADERS = Collections.emptyList(); - } diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 3d2f318e1168..93f3c715c146 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -37,7 +37,6 @@ import org.elasticsearch.indices.IndicesRequestCache; import org.elasticsearch.indices.ShardLimitValidator; -import java.util.Collections; import java.util.Map; import java.util.Set; @@ -204,7 +203,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { public static final IndexScopedSettings DEFAULT_SCOPED_SETTINGS = new IndexScopedSettings(Settings.EMPTY, BUILT_IN_INDEX_SETTINGS); public IndexScopedSettings(Settings settings, Set> settingsSet) { - super(settings, settingsSet, Collections.emptySet(), Property.IndexScope); + super(settings, settingsSet, Property.IndexScope); } private IndexScopedSettings(Settings settings, IndexScopedSettings other, IndexMetadata metadata) { diff --git a/server/src/main/java/org/elasticsearch/common/settings/SettingUpgrader.java b/server/src/main/java/org/elasticsearch/common/settings/SettingUpgrader.java deleted file mode 100644 index d49e45ee10f9..000000000000 --- a/server/src/main/java/org/elasticsearch/common/settings/SettingUpgrader.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.common.settings; - -import java.util.List; - -/** - * Represents the logic to upgrade a setting. - * - * @param the type of the underlying setting - */ -public interface SettingUpgrader { - - /** - * The setting upgraded by this upgrader. - * - * @return the setting - */ - Setting getSetting(); - - /** - * The logic to upgrade the setting key, for example by mapping the old setting key to the new setting key. - * - * @param key the setting key to upgrade - * @return the upgraded setting key - */ - String getKey(String key); - - /** - * The logic to upgrade the setting value. - * - * @param value the setting value to upgrade - * @return the upgraded setting value - */ - default String getValue(final String value) { - return value; - } - - default List getListValue(final List value) { - return value; - } - -} diff --git a/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java b/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java index fc6d72694f2e..c0ad251e7c2c 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java +++ b/server/src/main/java/org/elasticsearch/common/settings/SettingsModule.java @@ -43,20 +43,14 @@ public class SettingsModule implements Module { private final SettingsFilter settingsFilter; public SettingsModule(Settings settings, Setting... additionalSettings) { - this(settings, Arrays.asList(additionalSettings), Collections.emptyList(), Collections.emptySet()); + this(settings, Arrays.asList(additionalSettings), Collections.emptyList()); } - public SettingsModule( - Settings settings, - List> additionalSettings, - List settingsFilter, - Set> settingUpgraders - ) { + public SettingsModule(Settings settings, List> additionalSettings, List settingsFilter) { this( settings, additionalSettings, settingsFilter, - settingUpgraders, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS ); @@ -66,7 +60,6 @@ public SettingsModule( final Settings settings, final List> additionalSettings, final List settingsFilter, - final Set> settingUpgraders, final Set> registeredClusterSettings, final Set> registeredIndexSettings ) { @@ -84,19 +77,8 @@ public SettingsModule( for (String filter : settingsFilter) { registerSettingsFilter(filter); } - final Set> clusterSettingUpgraders = new HashSet<>(); - for (final SettingUpgrader settingUpgrader : ClusterSettings.BUILT_IN_SETTING_UPGRADERS) { - assert settingUpgrader.getSetting().hasNodeScope() : settingUpgrader.getSetting().getKey(); - final boolean added = clusterSettingUpgraders.add(settingUpgrader); - assert added : settingUpgrader.getSetting().getKey(); - } - for (final SettingUpgrader settingUpgrader : settingUpgraders) { - assert settingUpgrader.getSetting().hasNodeScope() : settingUpgrader.getSetting().getKey(); - final boolean added = clusterSettingUpgraders.add(settingUpgrader); - assert added : settingUpgrader.getSetting().getKey(); - } this.indexScopedSettings = new IndexScopedSettings(settings, new HashSet<>(this.indexSettings.values())); - this.clusterSettings = new ClusterSettings(settings, new HashSet<>(this.nodeSettings.values()), clusterSettingUpgraders); + this.clusterSettings = new ClusterSettings(settings, new HashSet<>(this.nodeSettings.values())); Settings indexSettings = settings.filter((s) -> s.startsWith("index.") && clusterSettings.get(s) == null); if (indexSettings.isEmpty() == false) { try { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdater.java b/server/src/main/java/org/elasticsearch/common/settings/SettingsUpdater.java similarity index 95% rename from server/src/main/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdater.java rename to server/src/main/java/org/elasticsearch/common/settings/SettingsUpdater.java index 89176229ebf8..2fa87e55fe3b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdater.java +++ b/server/src/main/java/org/elasticsearch/common/settings/SettingsUpdater.java @@ -6,15 +6,13 @@ * Side Public License, v 1. */ -package org.elasticsearch.action.admin.cluster.settings; +package org.elasticsearch.common.settings; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Tuple; import java.util.Map; @@ -26,24 +24,24 @@ * Updates transient and persistent cluster state settings if there are any changes * due to the update. */ -final class SettingsUpdater { +public final class SettingsUpdater { final Settings.Builder transientUpdates = Settings.builder(); final Settings.Builder persistentUpdates = Settings.builder(); private final ClusterSettings clusterSettings; - SettingsUpdater(ClusterSettings clusterSettings) { + public SettingsUpdater(ClusterSettings clusterSettings) { this.clusterSettings = clusterSettings; } - synchronized Settings getTransientUpdates() { + public synchronized Settings getTransientUpdates() { return transientUpdates.build(); } - synchronized Settings getPersistentUpdate() { + public synchronized Settings getPersistentUpdate() { return persistentUpdates.build(); } - synchronized ClusterState updateSettings( + public synchronized ClusterState updateSettings( final ClusterState currentState, final Settings transientToApply, final Settings persistentToApply, diff --git a/server/src/main/java/org/elasticsearch/common/time/DateFormatter.java b/server/src/main/java/org/elasticsearch/common/time/DateFormatter.java index 00c67f39b756..b168513baf42 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateFormatter.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateFormatter.java @@ -100,7 +100,7 @@ default String formatMillis(long millis) { DateMathParser toDateMathParser(); static DateFormatter forPattern(String input) { - return forPattern(input, IndexVersion.CURRENT); + return forPattern(input, IndexVersion.current()); } static DateFormatter forPattern(String input, IndexVersion supportedVersion) { diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/AbstractLifecycleRunnable.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/AbstractLifecycleRunnable.java deleted file mode 100644 index e3434df52b4b..000000000000 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/AbstractLifecycleRunnable.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.common.util.concurrent; - -import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.component.Lifecycle; - -import java.util.Objects; - -/** - * {@code AbstractLifecycleRunnable} is a service-lifecycle aware {@link AbstractRunnable}. - *

- * This simplifies the running and rescheduling of {@link Lifecycle}-based {@code Runnable}s. - */ -public abstract class AbstractLifecycleRunnable extends AbstractRunnable { - /** - * The monitored lifecycle for the associated service. - */ - private final Lifecycle lifecycle; - /** - * The service's logger (note: this is passed in!). - */ - private final Logger logger; - - /** - * {@link AbstractLifecycleRunnable} must be aware of the actual {@code lifecycle} to react properly. - * - * @param lifecycle The lifecycle to react too - * @param logger The logger to use when logging - * @throws NullPointerException if any parameter is {@code null} - */ - public AbstractLifecycleRunnable(Lifecycle lifecycle, Logger logger) { - this.lifecycle = Objects.requireNonNull(lifecycle, "lifecycle must not be null"); - this.logger = Objects.requireNonNull(logger, "logger must not be null"); - } - - /** - * {@inheritDoc} - *

- * This invokes {@link #doRunInLifecycle()} only if the {@link #lifecycle} is not stopped or closed. Otherwise it exits - * immediately. - */ - @Override - protected final void doRun() throws Exception { - // prevent execution if the service is stopped - if (lifecycle.stoppedOrClosed()) { - logger.trace("lifecycle is stopping. exiting"); - return; - } - - doRunInLifecycle(); - } - - /** - * Perform runnable logic, but only if the {@link #lifecycle} is not stopped or closed. - * - * @throws InterruptedException if the run method throws an {@link InterruptedException} - */ - protected abstract void doRunInLifecycle() throws Exception; - - /** - * {@inheritDoc} - *

- * This overrides the default behavior of {@code onAfter} to add the caveat that it only runs if the {@link #lifecycle} is not - * stopped or closed. - *

- * Note: this does not guarantee that it won't be stopped concurrently as it invokes {@link #onAfterInLifecycle()}, - * but it's a solid attempt at preventing it. For those that use this for rescheduling purposes, the next invocation would be - * effectively cancelled immediately if that's the case. - * - * @see #onAfterInLifecycle() - */ - @Override - public final void onAfter() { - if (lifecycle.stoppedOrClosed() == false) { - onAfterInLifecycle(); - } - } - - /** - * This method is invoked in the finally block of the run method, but it is only executed if the {@link #lifecycle} is not - * stopped or closed. - *

- * This method is most useful for rescheduling the next iteration of the current runnable. - */ - protected void onAfterInLifecycle() { - // nothing by default - } -} diff --git a/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java b/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java index a2cf2bf8c6fd..89fd114e7852 100644 --- a/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java +++ b/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java @@ -90,7 +90,7 @@ public void connectToRemoteMasterNode(TransportAddress transportAddress, ActionL new VersionInformation( Version.CURRENT.minimumCompatibilityVersion(), IndexVersion.MINIMUM_COMPATIBLE, - IndexVersion.CURRENT + IndexVersion.current() ) ), handshakeConnectionProfile, diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 92c106c5ac05..880b4554e67a 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -624,7 +624,7 @@ private static NodeMetadata loadNodeMetadata(Settings settings, Logger logger, D assert nodeIds.isEmpty() : nodeIds; // If we couldn't find legacy metadata, we set the latest index version to this version. This happens // when we are starting a new node and there are no indices to worry about. - metadata = new NodeMetadata(generateNodeId(settings), Version.CURRENT, IndexVersion.CURRENT); + metadata = new NodeMetadata(generateNodeId(settings), Version.CURRENT, IndexVersion.current()); } else { assert nodeIds.equals(Collections.singleton(legacyMetadata.nodeId())) : nodeIds + " doesn't match " + legacyMetadata; metadata = legacyMetadata; diff --git a/server/src/main/java/org/elasticsearch/gateway/ClusterStateUpdaters.java b/server/src/main/java/org/elasticsearch/gateway/ClusterStateUpdaters.java index 9368d029b94d..758690e41699 100644 --- a/server/src/main/java/org/elasticsearch/gateway/ClusterStateUpdaters.java +++ b/server/src/main/java/org/elasticsearch/gateway/ClusterStateUpdaters.java @@ -44,14 +44,14 @@ public static ClusterState upgradeAndArchiveUnknownOrInvalidSettings( metadataBuilder.persistentSettings( clusterSettings.archiveUnknownOrInvalidSettings( - clusterSettings.upgradeSettings(metadataBuilder.persistentSettings()), + metadataBuilder.persistentSettings(), e -> logUnknownSetting("persistent", e), (e, ex) -> logInvalidSetting("persistent", e, ex) ) ); metadataBuilder.transientSettings( clusterSettings.archiveUnknownOrInvalidSettings( - clusterSettings.upgradeSettings(metadataBuilder.transientSettings()), + metadataBuilder.transientSettings(), e -> logUnknownSetting("transient", e), (e, ex) -> logInvalidSetting("transient", e, ex) ) diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index 52bedb7f1b62..d625c76bcad0 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -35,6 +35,7 @@ import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Tuple; import org.elasticsearch.env.NodeMetadata; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.node.Node; import org.elasticsearch.plugins.ClusterCoordinationPlugin; import org.elasticsearch.plugins.MetadataUpgrader; @@ -277,10 +278,7 @@ static Metadata upgradeMetadata(Metadata metadata, IndexMetadataVerifier indexMe boolean changed = false; final Metadata.Builder upgradedMetadata = Metadata.builder(metadata); for (IndexMetadata indexMetadata : metadata) { - IndexMetadata newMetadata = indexMetadataVerifier.verifyIndexMetadata( - indexMetadata, - Version.CURRENT.minimumIndexCompatibilityVersion() - ); + IndexMetadata newMetadata = indexMetadataVerifier.verifyIndexMetadata(indexMetadata, IndexVersion.MINIMUM_COMPATIBLE); changed |= indexMetadata != newMetadata; upgradedMetadata.put(newMetadata, false); } diff --git a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java index ff4a3f02c3e4..4ea50361b618 100644 --- a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java +++ b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java @@ -128,7 +128,7 @@ public ClusterState execute(ClusterState currentState) { boolean importNeeded = false; StringBuilder sb = new StringBuilder(); for (IndexMetadata indexMetadata : request.indices) { - if (indexMetadata.getCompatibilityVersion().indexVersion.before(minIndexCompatibilityVersion)) { + if (indexMetadata.getCompatibilityVersion().before(minIndexCompatibilityVersion)) { logger.warn( "ignoring dangled index [{}] on node [{}] since it's current compatibility version [{}] " + "is not supported by at least one node in the cluster minVersion [{}]", @@ -139,7 +139,7 @@ public ClusterState execute(ClusterState currentState) { ); continue; } - if (indexMetadata.getCompatibilityVersion().indexVersion.after(maxIndexCompatibilityVersion)) { + if (indexMetadata.getCompatibilityVersion().after(maxIndexCompatibilityVersion)) { logger.warn( "ignoring dangled index [{}] on node [{}] since its current compatibility version [{}] " + "is later than the maximum supported index version in the cluster [{}]", @@ -175,10 +175,7 @@ public ClusterState execute(ClusterState currentState) { try { // The dangled index might be from an older version, we need to make sure it's compatible // with the current version. - newIndexMetadata = indexMetadataVerifier.verifyIndexMetadata( - indexMetadata, - minIndexCompatibilityVersion.toVersion() - ); + newIndexMetadata = indexMetadataVerifier.verifyIndexMetadata(indexMetadata, minIndexCompatibilityVersion); newIndexMetadata = IndexMetadata.builder(newIndexMetadata) .settings( Settings.builder() diff --git a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java index 6730379edc0e..528efe8fa8b0 100644 --- a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java +++ b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java @@ -54,11 +54,14 @@ import java.util.Collections; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.StampedLock; import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_BIND_HOST; @@ -87,7 +90,7 @@ public abstract class AbstractHttpServerTransport extends AbstractLifecycleCompo private volatile BoundTransportAddress boundAddress; private final AtomicLong totalChannelsAccepted = new AtomicLong(); - private final Set httpChannels = Collections.newSetFromMap(new ConcurrentHashMap<>()); + private final Map httpChannels = new ConcurrentHashMap<>(); private final PlainActionFuture allClientsClosedListener = PlainActionFuture.newFuture(); private final RefCounted refCounted = AbstractRefCounted.of(() -> allClientsClosedListener.onResponse(null)); private final Set httpServerChannels = Collections.newSetFromMap(new ConcurrentHashMap<>()); @@ -96,7 +99,8 @@ public abstract class AbstractHttpServerTransport extends AbstractLifecycleCompo private final HttpTracer httpLogger; private final Tracer tracer; - private volatile boolean gracefullyCloseConnections; + private volatile boolean shuttingDown; + private final ReadWriteLock shuttingDownRWLock = new StampedLock().asReadWriteLock(); private volatile long slowLogThresholdMs; @@ -226,13 +230,16 @@ private TransportAddress bindAddress(final InetAddress hostAddress) { * Gracefully shut down. If {@link HttpTransportSettings#SETTING_HTTP_SERVER_SHUTDOWN_GRACE_PERIOD} is zero, the default, then * forcefully close all open connections immediately. * Serially run through the following steps: - * 1) Stop listening for new HTTP connections, which means no new HttpChannel are added to the {@link #httpChannels} list - * 2) Add the {@code Connection: close} response header to all new requests on existing {@link #httpChannels} and close the HttpChannel - * after the new request completes - * 3) If grace period is set, wait for all {@link #httpChannels} to close via 2 for up to the configured grace period, + *

    + *
  1. Stop listening for new HTTP connections, which means no new HttpChannel are added to the {@link #httpChannels} list. + * {@link #serverAcceptedChannel(HttpChannel)} will close any new channels to ensure this is true. + *
  2. Close the HttpChannel after a new request completes on all existing channels. + *
  3. Close all idle channels. + *
  4. If grace period is set, wait for all httpChannels to close via 2 for up to the configured grace period, * {@link #shutdownGracePeriodMillis}. - * If all connections are closed before the expiration of the grace period, stop waiting early. - * 4) Close all open httpChannels even if requests are in flight. + * If all connections are closed before the expiration of the grace period, stop waiting early. + *
  5. Close all remaining open httpChannels even if requests are in flight. + *
*/ @Override protected void doStop() { @@ -247,20 +254,33 @@ protected void doStop() { } } } - gracefullyCloseConnections(); - refCounted.decRef(); + + var wlock = shuttingDownRWLock.writeLock(); + try { + wlock.lock(); + shuttingDown = true; + refCounted.decRef(); + httpChannels.values().forEach(RequestTrackingHttpChannel::setCloseWhenIdle); + } finally { + wlock.unlock(); + } + boolean closed = false; + if (shutdownGracePeriodMillis > 0) { try { + logger.debug(format("waiting [%d]ms for clients to close connections", shutdownGracePeriodMillis)); FutureUtils.get(allClientsClosedListener, shutdownGracePeriodMillis, TimeUnit.MILLISECONDS); closed = true; } catch (ElasticsearchTimeoutException t) { logger.warn(format("timed out while waiting [%d]ms for clients to close connections", shutdownGracePeriodMillis)); } + } else { + logger.debug("closing all client connections immediately"); } if (closed == false) { try { - CloseableChannel.closeChannels(new ArrayList<>(httpChannels), true); + CloseableChannel.closeChannels(new ArrayList<>(httpChannels.values()), true); } catch (Exception e) { logger.warn("unexpected exception while closing http channels", e); } @@ -275,11 +295,8 @@ protected void doStop() { stopInternal(); } - /** - * Close the client channel after a new request. - */ - void gracefullyCloseConnections() { - gracefullyCloseConnections = true; + boolean isAcceptingConnections() { + return shuttingDown == false; } @Override @@ -367,8 +384,19 @@ protected static void onServerException(HttpServerChannel channel, Exception e) } protected void serverAcceptedChannel(HttpChannel httpChannel) { - boolean addedOnThisCall = httpChannels.add(httpChannel); - assert addedOnThisCall : "Channel should only be added to http channel set once"; + var rlock = shuttingDownRWLock.readLock(); + try { + rlock.lock(); + if (shuttingDown) { + logger.warn("server accepted channel after shutting down"); + httpChannel.close(); + return; + } + RequestTrackingHttpChannel trackingChannel = httpChannels.putIfAbsent(httpChannel, new RequestTrackingHttpChannel(httpChannel)); + assert trackingChannel == null : "Channel should only be added to http channel set once"; + } finally { + rlock.unlock(); + } refCounted.incRef(); httpChannel.addCloseListener(ActionListener.running(() -> { httpChannels.remove(httpChannel); @@ -387,9 +415,17 @@ protected void serverAcceptedChannel(HttpChannel httpChannel) { */ public void incomingRequest(final HttpRequest httpRequest, final HttpChannel httpChannel) { httpClientStatsTracker.updateClientStats(httpRequest, httpChannel); + final RequestTrackingHttpChannel trackingChannel = httpChannels.get(httpChannel); final long startTime = threadPool.rawRelativeTimeInMillis(); try { - handleIncomingRequest(httpRequest, httpChannel, httpRequest.getInboundException()); + // The channel may not be present if the close listener (set in serverAcceptedChannel) runs before this method because the + // connection closed early + if (trackingChannel == null) { + logger.warn("http channel [{}] missing tracking channel", httpChannel); + return; + } + trackingChannel.incomingRequest(); + handleIncomingRequest(httpRequest, trackingChannel, httpRequest.getInboundException()); } finally { final long took = threadPool.rawRelativeTimeInMillis() - startTime; networkService.getHandlingTimeTracker().addHandlingTime(took); @@ -492,8 +528,7 @@ private void handleIncomingRequest(final HttpRequest httpRequest, final HttpChan threadContext, corsHandler, maybeHttpLogger, - tracer, - gracefullyCloseConnections + tracer ); } catch (final IllegalArgumentException e) { badRequestCause = ExceptionsHelper.useOrSuppress(badRequestCause, e); @@ -507,8 +542,7 @@ private void handleIncomingRequest(final HttpRequest httpRequest, final HttpChan threadContext, corsHandler, httpLogger, - tracer, - gracefullyCloseConnections + tracer ); } channel = innerChannel; @@ -550,4 +584,76 @@ private static ActionListener earlyResponseListener(HttpRequest request, H public ThreadPool getThreadPool() { return threadPool; } + + /** + * A {@link HttpChannel} that tracks number of requests via a {@link RefCounted}. + */ + private static class RequestTrackingHttpChannel implements HttpChannel { + /** + * Only counts down to zero via {@link #setCloseWhenIdle()}. + */ + final RefCounted refCounted = AbstractRefCounted.of(this::closeInner); + final HttpChannel inner; + + RequestTrackingHttpChannel(HttpChannel inner) { + this.inner = inner; + } + + public void incomingRequest() throws IllegalStateException { + refCounted.incRef(); + } + + /** + * Close the channel when there are no more requests in flight. + */ + public void setCloseWhenIdle() { + refCounted.decRef(); + } + + @Override + public void close() { + closeInner(); + } + + /** + * Synchronized to avoid double close due to a natural close and a close via {@link #setCloseWhenIdle()} + */ + private void closeInner() { + synchronized (inner) { + if (inner.isOpen()) { + inner.close(); + } else { + logger.info("channel [{}] already closed", inner); + } + } + } + + @Override + public void addCloseListener(ActionListener listener) { + inner.addCloseListener(listener); + } + + @Override + public boolean isOpen() { + return inner.isOpen(); + } + + @Override + public void sendResponse(HttpResponse response, ActionListener listener) { + inner.sendResponse( + response, + listener != null ? ActionListener.runAfter(listener, refCounted::decRef) : ActionListener.running(refCounted::decRef) + ); + } + + @Override + public InetSocketAddress getLocalAddress() { + return inner.getLocalAddress(); + } + + @Override + public InetSocketAddress getRemoteAddress() { + return inner.getRemoteAddress(); + } + } } diff --git a/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java b/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java index 2b4e1fdc1d58..6fc6e7eb3ffb 100644 --- a/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java +++ b/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java @@ -56,7 +56,6 @@ public class DefaultRestChannel extends AbstractRestChannel implements RestChann private final HttpChannel httpChannel; private final CorsHandler corsHandler; private final Tracer tracer; - private final boolean closeConnection; @Nullable private final HttpTracer httpLogger; @@ -70,8 +69,7 @@ public class DefaultRestChannel extends AbstractRestChannel implements RestChann ThreadContext threadContext, CorsHandler corsHandler, @Nullable HttpTracer httpLogger, - Tracer tracer, - boolean closeConnection + Tracer tracer ) { super(request, settings.detailedErrorsEnabled()); this.httpChannel = httpChannel; @@ -82,7 +80,6 @@ public class DefaultRestChannel extends AbstractRestChannel implements RestChann this.corsHandler = corsHandler; this.httpLogger = httpLogger; this.tracer = tracer; - this.closeConnection = closeConnection; } @Override @@ -98,7 +95,7 @@ public void sendResponse(RestResponse restResponse) { final SpanId spanId = SpanId.forRestRequest(request); final ArrayList toClose = new ArrayList<>(4); - if (HttpUtils.shouldCloseConnection(httpRequest) || closeConnection) { + if (HttpUtils.shouldCloseConnection(httpRequest)) { toClose.add(() -> CloseableChannel.closeChannel(httpChannel)); } toClose.add(() -> tracer.stopTrace(request)); @@ -162,9 +159,6 @@ public void sendResponse(RestResponse restResponse) { // Add all custom headers addCustomHeaders(httpResponse, restResponse.getHeaders()); addCustomHeaders(httpResponse, restResponse.filterHeaders(threadContext.getResponseHeaders())); - if (closeConnection) { - setHeaderField(httpResponse, CONNECTION, CLOSE); - } // If our response doesn't specify a content-type header, set one setHeaderField(httpResponse, CONTENT_TYPE, restResponse.contentType(), false); diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index 72feba325acb..3820b9994518 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -268,7 +268,7 @@ public final class IndexSettings { TimeValue.MINUS_ONE, Property.NodeScope ); // TODO: remove setting - public static TimeValue STATELESS_DEFAULT_REFRESH_INTERVAL = TimeValue.timeValueSeconds(5); // TODO: settle on right value + public static TimeValue STATELESS_DEFAULT_REFRESH_INTERVAL = TimeValue.timeValueSeconds(10); // TODO: settle on right value public static final Setting INDEX_REFRESH_INTERVAL_SETTING = Setting.timeSetting("index.refresh_interval", (settings) -> { if (EXISTING_SHARDS_ALLOCATOR_SETTING.get(settings).equals("stateless") && INDEX_FAST_REFRESH_SETTING.get(settings) == false) { return STATELESS_DEFAULT_REFRESH_INTERVAL; diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersion.java b/server/src/main/java/org/elasticsearch/index/IndexVersion.java index 59fddeb24cb9..dfddc35398f7 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersion.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersion.java @@ -13,11 +13,13 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Assertions; +import org.elasticsearch.internal.VersionExtension; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.io.IOException; import java.lang.reflect.Field; import java.util.Collection; import java.util.Collections; @@ -53,7 +55,7 @@ * Each index version should only be used in a single merged commit (apart from BwC versions copied from {@link Version}). *

* To add a new index version, add a new constant at the bottom of the list that is one greater than the current highest version, - * ensure it has a unique id, and update the {@link #CURRENT} constant to point to the new version. + * ensure it has a unique id, and update the {@link #current()} constant to point to the new version. *

Reverting an index version

* If you revert a commit with an index version change, you must ensure there is a new index version * representing the reverted change. Do not let the index version go backwards, it must always be incremented. @@ -141,6 +143,7 @@ private static IndexVersion registerIndexVersion(int id, Version luceneVersion, public static final IndexVersion V_7_17_9 = registerIndexVersion(7_17_09_99, Version.LUCENE_8_11_1, "8044989f-77ef-4d6d-9dd8-1bdd805cef74"); public static final IndexVersion V_7_17_10 = registerIndexVersion(7_17_10_99, Version.LUCENE_8_11_1, "66b743fb-8be6-443f-8920-d8c5ed561857"); public static final IndexVersion V_7_17_11 = registerIndexVersion(7_17_11_99, Version.LUCENE_8_11_1, "f1935acc-1af9-44b0-97e9-67112d333753"); + public static final IndexVersion V_7_17_12 = registerIndexVersion(7_17_12_99, Version.LUCENE_8_11_1, "1a0719f2-96f4-4df5-b20d-62244e27d7d4"); public static final IndexVersion V_8_0_0 = registerIndexVersion(8_00_00_99, Version.LUCENE_9_0_0, "ff18a13c-1fa7-4cf7-a3b1-4fdcd9461d5b"); public static final IndexVersion V_8_0_1 = registerIndexVersion(8_00_01_99, Version.LUCENE_9_0_0, "4bd5650f-3eff-418f-a7a6-ad46b2a9c941"); public static final IndexVersion V_8_1_0 = registerIndexVersion(8_01_00_99, Version.LUCENE_9_0_0, "b4742461-ee43-4fd0-a260-29f8388b82ec"); @@ -171,23 +174,35 @@ private static IndexVersion registerIndexVersion(int id, Version luceneVersion, public static final IndexVersion V_8_8_0 = registerIndexVersion(8_08_00_99, Version.LUCENE_9_6_0, "d6ffc8d7-f6bd-469b-8495-01688c310000"); public static final IndexVersion V_8_8_1 = registerIndexVersion(8_08_01_99, Version.LUCENE_9_6_0, "a613499e-ec1a-4b0b-81d3-a766aff3c27c"); public static final IndexVersion V_8_8_2 = registerIndexVersion(8_08_02_99, Version.LUCENE_9_6_0, "9db9d888-6be8-4a58-825c-f423fd8c6b00"); + public static final IndexVersion V_8_8_3 = registerIndexVersion(8_08_03_99, Version.LUCENE_9_6_0, "e279a94a-25e8-4919-9a17-39af37b75a67"); public static final IndexVersion V_8_9_0 = registerIndexVersion(8_09_00_99, Version.LUCENE_9_7_0, "32f6dbab-cc24-4f5b-87b5-015a848480d9"); public static final IndexVersion V_8_10_0 = registerIndexVersion(8_10_00_99, Version.LUCENE_9_7_0, "2e107286-12ad-4c51-9a6f-f8943663b6e7"); /* * READ THE JAVADOC ABOVE BEFORE ADDING NEW INDEX VERSIONS * Detached index versions added below here. */ + private static class CurrentHolder { + private static final IndexVersion CURRENT = findCurrent(V_8_10_0); + + // finds the pluggable current version, or uses the given fallback + private static IndexVersion findCurrent(IndexVersion fallback) { + var versionExtension = VersionExtension.load(); + if (versionExtension == null) { + return fallback; + } + var version = versionExtension.getCurrentIndexVersion(); - /** - * Reference to the most recent index version. - * This should be the index version with the highest id. - */ - public static final IndexVersion CURRENT = V_8_10_0; + assert version.onOrAfter(fallback); + assert version.luceneVersion.equals(Version.LATEST) + : "IndexVersion must be upgraded to [" + + Version.LATEST + + "] is still set to [" + + version.luceneVersion + + "]"; + return version; + } + } - /** - * Reference to the earliest compatible index version to this version of the codebase. - * This should be the index version used by the first release of the previous major version. - */ public static final IndexVersion MINIMUM_COMPATIBLE = V_7_0_0; static { @@ -239,13 +254,6 @@ static NavigableMap getAllVersionIds(Class cls) { static { VERSION_IDS = getAllVersionIds(IndexVersion.class); - - assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) - : "IndexVersion must be upgraded to [" - + org.apache.lucene.util.Version.LATEST - + "] is still set to [" - + CURRENT.luceneVersion - + "]"; } static Collection getAllVersions() { @@ -298,6 +306,14 @@ public static IndexVersion max(IndexVersion version1, IndexVersion version2) { return version1.id > version2.id ? version1 : version2; } + /** + * Returns the most recent index version. + * This should be the index version with the highest id. + */ + public static IndexVersion current() { + return CurrentHolder.CURRENT; + } + public boolean after(IndexVersion version) { return version.id < id; } @@ -330,7 +346,7 @@ public int compareTo(IndexVersion other) { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return builder.value(toString()); + return builder.value(id); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactory.java b/server/src/main/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactory.java index 645e679f9909..4269e710e9f0 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactory.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactory.java @@ -51,7 +51,7 @@ public PreBuiltAnalyzerProviderFactory(String name, PreBuiltCacheFactory.Caching public AnalyzerProvider get(IndexSettings indexSettings, Environment environment, String name, Settings settings) throws IOException { IndexVersion versionCreated = indexSettings.getIndexVersionCreated(); - if (IndexVersion.CURRENT.equals(versionCreated) == false) { + if (IndexVersion.current().equals(versionCreated) == false) { return super.get(indexSettings, environment, name, settings); } else { return current; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldType.java index d35dc131e6c4..1b5bd9ee2bb8 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldType.java @@ -278,7 +278,7 @@ protected final RuntimeField createChildRuntimeField( } final RuntimeField createRuntimeField(Factory scriptFactory) { - return createRuntimeField(scriptFactory, IndexVersion.CURRENT); + return createRuntimeField(scriptFactory, IndexVersion.current()); } final RuntimeField createRuntimeField(Factory scriptFactory, IndexVersion indexVersion) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ConstantFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/ConstantFieldType.java index b7a360f1cd5d..ed714e597cbb 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ConstantFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ConstantFieldType.java @@ -98,6 +98,10 @@ public final Query prefixQuery( boolean caseInsensitive, SearchExecutionContext context ) { + return prefixQuery(prefix, caseInsensitive, context); + } + + public final Query prefixQuery(String prefix, boolean caseInsensitive, QueryRewriteContext context) { String pattern = prefix + "*"; if (matches(pattern, caseInsensitive, context)) { return Queries.newMatchAllQuery(); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DateScriptFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/DateScriptFieldType.java index 3764a6c3b081..95bf70f49d7c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DateScriptFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DateScriptFieldType.java @@ -109,7 +109,7 @@ AbstractScriptFieldType createFieldType( Map meta, OnScriptError onScriptError ) { - return createFieldType(name, factory, script, meta, IndexVersion.CURRENT, onScriptError); + return createFieldType(name, factory, script, meta, IndexVersion.current(), onScriptError); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index 4de474f545c4..e929af366f12 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -1047,7 +1047,7 @@ public static Parameter analyzerParam( Function initializer, Supplier defaultAnalyzer ) { - return analyzerParam(name, updateable, initializer, defaultAnalyzer, IndexVersion.CURRENT); + return analyzerParam(name, updateable, initializer, defaultAnalyzer, IndexVersion.current()); } /** diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index 63ea7a9a4a29..1d2dea3ebed9 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -277,7 +277,7 @@ public static class Builder extends FieldMapper.Builder { final TextParams.Analyzers analyzers; public Builder(String name, IndexAnalyzers indexAnalyzers) { - this(name, IndexVersion.CURRENT, indexAnalyzers); + this(name, IndexVersion.current(), indexAnalyzers); } public Builder(String name, IndexVersion indexCreatedVersion, IndexAnalyzers indexAnalyzers) { diff --git a/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java index a2c46e4e9d6d..1a9fea929a20 100644 --- a/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java @@ -322,7 +322,7 @@ protected QueryBuilder doCoordinatorRewrite(final CoordinatorRewriteContext coor * @return A {@link QueryBuilder} representing the rewritten query. */ protected QueryBuilder doSearchRewrite(final SearchExecutionContext searchExecutionContext) throws IOException { - return this; + return doIndexMetadataRewrite(searchExecutionContext); } /** diff --git a/server/src/main/java/org/elasticsearch/index/query/DataRewriteContext.java b/server/src/main/java/org/elasticsearch/index/query/DataRewriteContext.java new file mode 100644 index 000000000000..63aedeafcd25 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/query/DataRewriteContext.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.index.query; + +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.xcontent.XContentParserConfiguration; + +import java.util.function.LongSupplier; + +/** + * Context object used to rewrite {@link QueryBuilder} instances into simplified version on the datanode where the request is going to be + * executed. + * + * Note: the way search requests are executed and rewritten currently on each node is that it is done by shard. So, `DataRewriteContext` + * will be used in `rewrite` per shard but before the query phase. + */ +public class DataRewriteContext extends QueryRewriteContext { + public DataRewriteContext(final XContentParserConfiguration parserConfiguration, final Client client, final LongSupplier nowInMillis) { + super(parserConfiguration, client, nowInMillis); + } + + @Override + public DataRewriteContext convertToDataRewriteContext() { + return this; + } +} diff --git a/server/src/main/java/org/elasticsearch/index/query/ExistsQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/ExistsQueryBuilder.java index d7e54df00ed0..06e3e41e9c4a 100644 --- a/server/src/main/java/org/elasticsearch/index/query/ExistsQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/ExistsQueryBuilder.java @@ -66,14 +66,12 @@ public String fieldName() { } @Override - protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws IOException { - SearchExecutionContext context = queryRewriteContext.convertToSearchExecutionContext(); - if (context != null) { - if (getMappedFields(context, fieldName).isEmpty()) { - return new MatchNoneQueryBuilder(); - } + protected QueryBuilder doIndexMetadataRewrite(QueryRewriteContext context) throws IOException { + if (getMappedFields(context, fieldName).isEmpty()) { + return new MatchNoneQueryBuilder(); + } else { + return this; } - return super.doRewrite(queryRewriteContext); } @Override @@ -153,7 +151,7 @@ public static Query newFilter(SearchExecutionContext context, String fieldPatter return new ConstantScoreQuery(boolFilterBuilder.build()); } - private static Collection getMappedFields(SearchExecutionContext context, String fieldPattern) { + private static Collection getMappedFields(QueryRewriteContext context, String fieldPattern) { Set matchingFieldNames = context.getMatchingFieldNames(fieldPattern); if (matchingFieldNames.isEmpty()) { // might be an object field, so try matching it as an object prefix pattern diff --git a/server/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java index 04421048303e..f1dc5d125955 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java @@ -156,11 +156,6 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep builder.endObject(); } - @Override - protected QueryBuilder doSearchRewrite(SearchExecutionContext searchExecutionContext) throws IOException { - return doIndexMetadataRewrite(searchExecutionContext); - } - @Override protected QueryBuilder doIndexMetadataRewrite(QueryRewriteContext context) throws IOException { // If we're using the default keyword analyzer then we can rewrite this to a TermQueryBuilder diff --git a/server/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java index b96085accf24..e06051154c54 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java @@ -368,26 +368,22 @@ public void doXContent(XContentBuilder builder, Params params) throws IOExceptio } @Override - protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws IOException { + protected QueryBuilder doIndexMetadataRewrite(QueryRewriteContext context) throws IOException { if (fuzziness != null || lenient) { // Term queries can be neither fuzzy nor lenient, so don't rewrite under these conditions return this; } - SearchExecutionContext sec = queryRewriteContext.convertToSearchExecutionContext(); - if (sec == null) { - return this; - } // If we're using a keyword analyzer then we can rewrite this to a TermQueryBuilder // and possibly shortcut - NamedAnalyzer configuredAnalyzer = configuredAnalyzer(sec); + NamedAnalyzer configuredAnalyzer = configuredAnalyzer(context); if (configuredAnalyzer != null && configuredAnalyzer.analyzer() instanceof KeywordAnalyzer) { TermQueryBuilder termQueryBuilder = new TermQueryBuilder(fieldName, value); - return termQueryBuilder.rewrite(sec); + return termQueryBuilder.rewrite(context); } return this; } - private NamedAnalyzer configuredAnalyzer(SearchExecutionContext context) { + private NamedAnalyzer configuredAnalyzer(QueryRewriteContext context) { if (analyzer != null) { return context.getIndexAnalyzers().get(analyzer); } diff --git a/server/src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java index 40d7737ebd7e..5a260f487e6b 100644 --- a/server/src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java @@ -186,28 +186,24 @@ public String getWriteableName() { } @Override - protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws IOException { - SearchExecutionContext context = queryRewriteContext.convertToSearchExecutionContext(); - if (context != null) { - MappedFieldType fieldType = context.getFieldType(this.fieldName); - if (fieldType == null) { + protected QueryBuilder doIndexMetadataRewrite(QueryRewriteContext context) throws IOException { + MappedFieldType fieldType = context.getFieldType(this.fieldName); + if (fieldType == null) { + return new MatchNoneQueryBuilder(); + } else if (fieldType instanceof ConstantFieldType constantFieldType) { + // This logic is correct for all field types, but by only applying it to constant + // fields we also have the guarantee that it doesn't perform I/O, which is important + // since rewrites might happen on a network thread. + Query query = constantFieldType.prefixQuery(value, caseInsensitive, context); + if (query instanceof MatchAllDocsQuery) { + return new MatchAllQueryBuilder(); + } else if (query instanceof MatchNoDocsQuery) { return new MatchNoneQueryBuilder(); - } else if (fieldType instanceof ConstantFieldType) { - // This logic is correct for all field types, but by only applying it to constant - // fields we also have the guarantee that it doesn't perform I/O, which is important - // since rewrites might happen on a network thread. - Query query = fieldType.prefixQuery(value, null, caseInsensitive, context); // the rewrite method doesn't matter - if (query instanceof MatchAllDocsQuery) { - return new MatchAllQueryBuilder(); - } else if (query instanceof MatchNoDocsQuery) { - return new MatchNoneQueryBuilder(); - } else { - assert false : "Constant fields must produce match-all or match-none queries, got " + query; - } + } else { + assert false : "Constant fields must produce match-all or match-none queries, got " + query; } } - - return super.doRewrite(queryRewriteContext); + return this; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java b/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java index 7630f99658f9..fc6dc04faf65 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; @@ -26,9 +27,11 @@ import java.util.ArrayList; import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.function.BiConsumer; import java.util.function.BooleanSupplier; import java.util.function.LongSupplier; @@ -148,6 +151,13 @@ public QueryRewriteContext convertToIndexMetadataContext() { return mapperService != null ? this : null; } + /** + * Returns an instance of {@link DataRewriteContext} if available or null otherwise + */ + public DataRewriteContext convertToDataRewriteContext() { + return null; + } + /** * Returns the {@link MappedFieldType} for the provided field name. * If the field is not mapped, the behaviour depends on the index.query.parse.allow_unmapped_fields setting, which defaults to true. @@ -282,4 +292,34 @@ public boolean indexMatches(String pattern) { assert indexNameMatcher != null; return indexNameMatcher.test(pattern); } + + /** + * Returns the names of all mapped fields that match a given pattern + * + * All names returned by this method are guaranteed to resolve to a + * MappedFieldType if passed to {@link #getFieldType(String)} + * + * @param pattern the field name pattern + */ + public Set getMatchingFieldNames(String pattern) { + if (runtimeMappings.isEmpty()) { + return mappingLookup.getMatchingFieldNames(pattern); + } + Set matches = new HashSet<>(mappingLookup.getMatchingFieldNames(pattern)); + if ("*".equals(pattern)) { + matches.addAll(runtimeMappings.keySet()); + } else if (Regex.isSimpleMatchPattern(pattern) == false) { + // no wildcard + if (runtimeMappings.containsKey(pattern)) { + matches.add(pattern); + } + } else { + for (String name : runtimeMappings.keySet()) { + if (Regex.simpleMatch(pattern, name)) { + matches.add(name); + } + } + } + return matches; + } } diff --git a/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java b/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java index 65fe9356a3f0..5f285326446a 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.lucene.search.Queries; -import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.index.Index; @@ -311,36 +310,6 @@ public boolean hasMappings() { return mappingLookup.hasMappings(); } - /** - * Returns the names of all mapped fields that match a given pattern - * - * All names returned by this method are guaranteed to resolve to a - * MappedFieldType if passed to {@link #getFieldType(String)} - * - * @param pattern the field name pattern - */ - public Set getMatchingFieldNames(String pattern) { - if (runtimeMappings.isEmpty()) { - return mappingLookup.getMatchingFieldNames(pattern); - } - Set matches = new HashSet<>(mappingLookup.getMatchingFieldNames(pattern)); - if ("*".equals(pattern)) { - matches.addAll(runtimeMappings.keySet()); - } else if (Regex.isSimpleMatchPattern(pattern) == false) { - // no wildcard - if (runtimeMappings.containsKey(pattern)) { - matches.add(pattern); - } - } else { - for (String name : runtimeMappings.keySet()) { - if (Regex.simpleMatch(pattern, name)) { - matches.add(name); - } - } - } - return matches; - } - /** * Returns true if the field identified by the provided name is mapped, false otherwise */ diff --git a/server/src/main/java/org/elasticsearch/index/query/TermQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/TermQueryBuilder.java index f5935aa7eff2..be66c2b87994 100644 --- a/server/src/main/java/org/elasticsearch/index/query/TermQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/TermQueryBuilder.java @@ -167,11 +167,6 @@ protected void addExtraXContent(XContentBuilder builder, Params params) throws I } } - @Override - protected QueryBuilder doSearchRewrite(SearchExecutionContext searchExecutionContext) throws IOException { - return doIndexMetadataRewrite(searchExecutionContext); - } - @Override protected QueryBuilder doIndexMetadataRewrite(QueryRewriteContext context) throws IOException { MappedFieldType fieldType = context.getFieldType(this.fieldName); diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java index c75cbb0308a8..a3f6d8a2921e 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseBackgroundSyncAction.java @@ -10,8 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.lucene.store.AlreadyClosedException; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActiveShardCount; @@ -26,12 +24,9 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.IndexShardClosedException; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; @@ -44,6 +39,7 @@ import java.util.Objects; import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.index.seqno.RetentionLeaseSyncAction.getExceptionLogLevel; /** * Replication action responsible for background syncing retention leases to replicas. This action is deliberately a replication action so @@ -129,20 +125,7 @@ public void handleResponse(ReplicationResponse response) { public void handleException(TransportException e) { task.setPhase("finished"); taskManager.unregister(task); - if (ExceptionsHelper.unwrap(e, NodeClosedException.class) != null) { - // node shutting down - return; - } - if (ExceptionsHelper.unwrap( - e, - IndexNotFoundException.class, - AlreadyClosedException.class, - IndexShardClosedException.class - ) != null) { - // the index was deleted or the shard is closed - return; - } - getLogger().warn(() -> format("%s retention lease background sync failed", shardId), e); + LOGGER.log(getExceptionLogLevel(e), () -> format("%s retention lease background sync failed", shardId), e); } } ); diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java index 44b5ba067421..ebfc8ff28f3f 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java @@ -18,6 +18,7 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.WriteResponse; import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; +import org.elasticsearch.action.support.replication.ReplicationOperation; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.ReplicationTask; import org.elasticsearch.action.support.replication.TransportWriteAction; @@ -37,6 +38,7 @@ import org.elasticsearch.index.shard.ShardNotInPrimaryModeException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; @@ -146,10 +148,12 @@ public void handleException(TransportException e) { static Level getExceptionLogLevel(Exception e) { return ExceptionsHelper.unwrap( e, + NodeClosedException.class, IndexNotFoundException.class, AlreadyClosedException.class, IndexShardClosedException.class, - ShardNotInPrimaryModeException.class + ShardNotInPrimaryModeException.class, + ReplicationOperation.RetryOnPrimaryException.class ) == null ? Level.WARN : Level.DEBUG; } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 85c3c9bfc39a..2201476d0f48 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -3627,7 +3627,8 @@ public int getActiveOperationsCount() { * listener handles all exception cases internally. */ public final void syncAfterWrite(Translog.Location location, Consumer syncListener) { - assert indexShardOperationPermits.getActiveOperationsCount() != 0; + // TODO AwaitsFix https://github.com/elastic/elasticsearch/issues/97183 + // assert indexShardOperationPermits.getActiveOperationsCount() != 0; verifyNotClosed(); getEngine().asyncEnsureTranslogSynced(location, syncListener); } @@ -3749,7 +3750,7 @@ ReplicationTracker getReplicationTracker() { } /** - * Executes a scheduled refresh if necessary. Completes the listener with true if a refreshed was performed otherwise false. + * Executes a scheduled refresh if necessary. Completes the listener with true if a refresh was performed otherwise false. */ public void scheduledRefresh(ActionListener listener) { ActionListener.run(listener, l -> { diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java b/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java index 3ab70a37a3c0..ca9de756ca21 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java @@ -59,7 +59,7 @@ final class ShardSplittingQuery extends Query { this.indexMetadata = indexMetadata; this.indexRouting = IndexRouting.fromIndexMetadata(indexMetadata); this.shardId = shardId; - this.nestedParentBitSetProducer = hasNested ? newParentDocBitSetProducer(indexMetadata.getCreationVersion().indexVersion) : null; + this.nestedParentBitSetProducer = hasNested ? newParentDocBitSetProducer(indexMetadata.getCreationVersion()) : null; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/translog/BufferedChecksumStreamInput.java b/server/src/main/java/org/elasticsearch/index/translog/BufferedChecksumStreamInput.java index 67f3b463834f..a8693f8fcc75 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/BufferedChecksumStreamInput.java +++ b/server/src/main/java/org/elasticsearch/index/translog/BufferedChecksumStreamInput.java @@ -101,7 +101,12 @@ public void reset() throws IOException { @Override public int read() throws IOException { - return readByte() & 0xFF; + int b = delegate.read(); + if (b == -1) { + return b; + } + digest.update((byte) b); + return b; } @Override diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 276749bdec50..de47fb04bf08 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -100,6 +100,7 @@ import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.CoordinatorRewriteContextProvider; +import org.elasticsearch.index.query.DataRewriteContext; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.recovery.RecoveryStats; @@ -1699,6 +1700,10 @@ public QueryRewriteContext getRewriteContext(LongSupplier nowInMillis) { return new QueryRewriteContext(parserConfig, client, nowInMillis); } + public DataRewriteContext getDataRewriteContext(LongSupplier nowInMillis) { + return new DataRewriteContext(parserConfig, client, nowInMillis); + } + public CoordinatorRewriteContextProvider getCoordinatorRewriteContextProvider(LongSupplier nowInMillis) { return new CoordinatorRewriteContextProvider(parserConfig, client, nowInMillis, clusterService::state, this::getTimestampFieldType); } @@ -1800,4 +1805,7 @@ public DateFieldMapper.DateFieldType getTimestampFieldType(Index index) { return timestampFieldMapperService.getTimestampFieldType(index); } + public IndexScopedSettings getIndexScopedSettings() { + return indexScopedSettings; + } } diff --git a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index d92b1935fd79..a81be7fb037f 100644 --- a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -931,10 +931,6 @@ private class FailedShardHandler implements Consumer { @Override public void accept(final IndexShard.ShardFailure shardFailure) { final ShardRouting shardRouting = shardFailure.routing(); - if (shardRouting.initializing()) { - // no need to fail the shard here during recovery, the recovery code will take care of failing it - return; - } threadPool.generic().execute(() -> { synchronized (IndicesClusterStateService.this) { failAndRemoveShard( diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index b3f2c60f9074..8eb3894c7c87 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -16,8 +16,10 @@ import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.support.ChannelActionListener; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -90,6 +92,7 @@ public static class Actions { public static final String HANDOFF_PRIMARY_CONTEXT = "internal:index/shard/recovery/handoff_primary_context"; } + private final Client client; private final ThreadPool threadPool; private final TransportService transportService; @@ -101,12 +104,14 @@ public static class Actions { private final RecoveriesCollection onGoingRecoveries; public PeerRecoveryTargetService( + Client client, ThreadPool threadPool, TransportService transportService, RecoverySettings recoverySettings, ClusterService clusterService, SnapshotFilesProvider snapshotFilesProvider ) { + this.client = client; this.threadPool = threadPool; this.transportService = transportService; this.recoverySettings = recoverySettings; @@ -289,7 +294,7 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi assert preExistingRequest == null; assert indexShard.indexSettings().getIndexMetadata().isSearchableSnapshot() == false; ActionListener.run(cleanupOnly.map(v -> { - logger.trace("{} preparing shard for peer recovery", recoveryTarget.shardId()); + logger.trace("{} preparing unpromotable shard for recovery", recoveryTarget.shardId()); indexShard.prepareForIndexRecovery(); // Skip unnecessary intermediate stages recoveryState.setStage(RecoveryState.Stage.VERIFY_INDEX); @@ -303,6 +308,35 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi return; } + if (indexShard.routingEntry().isSearchable() == false && recoveryState.getPrimary()) { + assert preExistingRequest == null; + assert indexShard.indexSettings().getIndexMetadata().isSearchableSnapshot() == false; + try (onCompletion) { + client.execute( + StatelessPrimaryRelocationAction.INSTANCE, + new StatelessPrimaryRelocationAction.Request( + recoveryId, + indexShard.shardId(), + transportService.getLocalNode(), + indexShard.routingEntry().allocationId().getId() + ), + new ActionListener<>() { + @Override + public void onResponse(ActionResponse.Empty ignored) { + onGoingRecoveries.markRecoveryAsDone(recoveryId); + } + + @Override + public void onFailure(Exception e) { + // TODO retries? See RecoveryResponseHandler#handleException + onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(recoveryState, null, e), true); + } + } + ); + return; + } + } + record StartRecoveryRequestToSend(StartRecoveryRequest startRecoveryRequest, String actionName, TransportRequest requestToSend) {} final ActionListener toSendListener = cleanupOnly.map(r -> { logger.trace( diff --git a/server/src/main/java/org/elasticsearch/internal/VersionExtension.java b/server/src/main/java/org/elasticsearch/internal/VersionExtension.java index 625eeb9c88ce..8afef81455a9 100644 --- a/server/src/main/java/org/elasticsearch/internal/VersionExtension.java +++ b/server/src/main/java/org/elasticsearch/internal/VersionExtension.java @@ -9,6 +9,7 @@ package org.elasticsearch.internal; import org.elasticsearch.TransportVersion; +import org.elasticsearch.index.IndexVersion; import java.util.ServiceLoader; @@ -23,6 +24,13 @@ public interface VersionExtension { */ TransportVersion getCurrentTransportVersion(); + /** + * Returns the {@link IndexVersion} that Elasticsearch should use. + *

+ * This must be at least equal to the latest version found in {@link IndexVersion} V_* constants. + */ + IndexVersion getCurrentIndexVersion(); + /** * Loads a single VersionExtension, or returns {@code null} if none are found. */ diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 594ad6d03b5d..c3db9cc11835 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -84,7 +84,6 @@ import org.elasticsearch.common.settings.ConsistentSettingsService; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.common.settings.SettingUpgrader; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.common.transport.BoundTransportAddress; @@ -475,14 +474,10 @@ protected Node( // this is as early as we can validate settings at this point. we already pass them to ScriptModule as well as ThreadPool // so we might be late here already - final Set> settingsUpgraders = pluginsService.flatMap(Plugin::getSettingUpgraders) - .collect(Collectors.toSet()); - final SettingsModule settingsModule = new SettingsModule( settings, additionalSettings, - pluginsService.flatMap(Plugin::getSettingsFilter).toList(), - settingsUpgraders + pluginsService.flatMap(Plugin::getSettingsFilter).toList() ); // creating `NodeEnvironment` breaks the ability to rollback to 7.x on an 8.0 upgrade (`upgradeLegacyNodeFolders`) so do this @@ -743,7 +738,8 @@ protected Node( clusterModule.getIndexNameExpressionResolver(), repositoriesServiceReference::get, tracer, - clusterModule.getAllocationService() + clusterModule.getAllocationService(), + indicesService ) ).toList(); @@ -1092,6 +1088,7 @@ protected Node( b.bind(PeerRecoveryTargetService.class) .toInstance( new PeerRecoveryTargetService( + client, threadPool, transportService, recoverySettings, diff --git a/server/src/main/java/org/elasticsearch/plugins/Plugin.java b/server/src/main/java/org/elasticsearch/plugins/Plugin.java index faa5a54a3e16..b736682b72bc 100644 --- a/server/src/main/java/org/elasticsearch/plugins/Plugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/Plugin.java @@ -18,12 +18,12 @@ import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.SettingUpgrader; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettingProvider; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; import org.elasticsearch.threadpool.ExecutorBuilder; @@ -83,6 +83,7 @@ public abstract class Plugin implements Closeable { * is called, but will return the repositories service once the node is initialized. * @param tracer An interface for distributed tracing * @param allocationService A service to manage shard allocation in the cluster + * @param indicesService A service to manage indices in the cluster */ public Collection createComponents( Client client, @@ -97,7 +98,8 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { return Collections.emptyList(); } @@ -146,15 +148,6 @@ public List getSettingsFilter() { return Collections.emptyList(); } - /** - * Get the setting upgraders provided by this plugin. - * - * @return the settings upgraders - */ - public List> getSettingUpgraders() { - return Collections.emptyList(); - } - /** * Provides a function to modify index template meta data on startup. *

diff --git a/server/src/main/java/org/elasticsearch/plugins/RepositoryPlugin.java b/server/src/main/java/org/elasticsearch/plugins/RepositoryPlugin.java index a61f0fc6721a..ff6d21f3039a 100644 --- a/server/src/main/java/org/elasticsearch/plugins/RepositoryPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/RepositoryPlugin.java @@ -8,10 +8,10 @@ package org.elasticsearch.plugins; -import org.elasticsearch.Version; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.repositories.Repository; import org.elasticsearch.snapshots.Snapshot; @@ -67,7 +67,7 @@ default Map getInternalRepositories( * * returns null if no check is provided */ - default BiConsumer addPreRestoreVersionCheck() { + default BiConsumer addPreRestoreVersionCheck() { return null; } diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java index 370c1a3a2b97..621bd98e3f29 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java @@ -8,11 +8,11 @@ package org.elasticsearch.repositories; -import org.elasticsearch.Version; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.RepositoryPlugin; import org.elasticsearch.repositories.fs.FsRepository; @@ -85,9 +85,9 @@ public RepositoriesModule( } } - List> preRestoreChecks = new ArrayList<>(); + List> preRestoreChecks = new ArrayList<>(); for (RepositoryPlugin repoPlugin : repoPlugins) { - BiConsumer preRestoreCheck = repoPlugin.addPreRestoreVersionCheck(); + BiConsumer preRestoreCheck = repoPlugin.addPreRestoreVersionCheck(); if (preRestoreCheck != null) { preRestoreChecks.add(preRestoreCheck); } @@ -100,7 +100,7 @@ public RepositoriesModule( "the snapshot was created with Elasticsearch version [" + version + "] which is below the current versions minimum index compatibility version [" - + Version.CURRENT.minimumIndexCompatibilityVersion() + + IndexVersion.MINIMUM_COMPATIBLE + "]" ); } diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index 531f430f01e0..dd0eed50e043 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; @@ -43,6 +42,7 @@ import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.repositories.blobstore.MeteredBlobStoreRepository; import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.threadpool.ThreadPool; @@ -100,7 +100,7 @@ public class RepositoriesService extends AbstractLifecycleComponent implements C private volatile Map repositories = Collections.emptyMap(); private final RepositoriesStatsArchive repositoriesStatsArchive; - private final List> preRestoreChecks; + private final List> preRestoreChecks; public RepositoriesService( Settings settings, @@ -109,7 +109,7 @@ public RepositoriesService( Map typesRegistry, Map internalTypesRegistry, ThreadPool threadPool, - List> preRestoreChecks + List> preRestoreChecks ) { this.typesRegistry = typesRegistry; this.internalTypesRegistry = internalTypesRegistry; @@ -903,7 +903,7 @@ private static RepositoryConflictException newRepositoryConflictException(String ); } - public List> getPreRestoreVersionChecks() { + public List> getPreRestoreVersionChecks() { return preRestoreChecks; } diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java b/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java index 089b5a6e639b..1f12ac8b4346 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.snapshots.SnapshotState; @@ -309,7 +310,7 @@ public SnapshotState getSnapshotState(final SnapshotId snapshotId) { * Returns the {@link Version} for the given snapshot or {@code null} if unknown. */ @Nullable - public Version getVersion(SnapshotId snapshotId) { + public IndexVersion getVersion(SnapshotId snapshotId) { return snapshotsDetails.getOrDefault(snapshotId.getUUID(), SnapshotDetails.EMPTY).getVersion(); } @@ -731,9 +732,13 @@ public XContentBuilder snapshotsToXContent(final XContentBuilder builder, final } builder.endObject(); } - final Version version = snapshotDetails.getVersion(); + final IndexVersion version = snapshotDetails.getVersion(); if (version != null) { - builder.field(VERSION, version.toString()); + if (version.before(IndexVersion.V_8_9_0)) { + builder.field(VERSION, Version.fromId(version.id()).toString()); + } else { + builder.field(VERSION, version.id()); + } } if (snapshotDetails.getStartTimeMillis() != -1) { @@ -903,13 +908,13 @@ private static void parseSnapshots( String uuid = null; SnapshotState state = null; Map metaGenerations = null; - Version version = null; + IndexVersion version = null; long startTimeMillis = -1; long endTimeMillis = -1; String slmPolicy = null; while (parser.nextToken() != XContentParser.Token.END_OBJECT) { String currentFieldName = parser.currentName(); - parser.nextToken(); + var token = parser.nextToken(); switch (currentFieldName) { case NAME -> name = parser.text(); case UUID -> uuid = parser.text(); @@ -918,7 +923,13 @@ private static void parseSnapshots( HashMap::new, p -> stringDeduplicator.computeIfAbsent(p.text(), Function.identity()) ); - case VERSION -> version = Version.fromString(parser.text()); + case VERSION -> { + switch (token) { + case VALUE_STRING -> version = IndexVersion.fromId(Version.fromString(parser.text()).id); // 8.9.0 or before + case VALUE_NUMBER -> version = IndexVersion.fromId(parser.intValue()); // separated index version + default -> throw new IllegalStateException("Unexpected token type " + token); + } + } case START_TIME_MILLIS -> { assert startTimeMillis == -1; startTimeMillis = parser.longValue(); @@ -1051,7 +1062,7 @@ public static class SnapshotDetails { private final SnapshotState snapshotState; @Nullable // may be omitted if pre-7.6 nodes were involved somewhere - private final Version version; + private final IndexVersion version; // May be -1 if unknown, which happens if the snapshot was taken before 7.14 and hasn't been updated yet private final long startTimeMillis; @@ -1066,7 +1077,7 @@ public static class SnapshotDetails { public SnapshotDetails( @Nullable SnapshotState snapshotState, - @Nullable Version version, + @Nullable IndexVersion version, long startTimeMillis, long endTimeMillis, @Nullable String slmPolicy @@ -1084,7 +1095,7 @@ public SnapshotState getSnapshotState() { } @Nullable - public Version getVersion() { + public IndexVersion getVersion() { return version; } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index d94f1495b4be..69df0589c3b7 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -77,6 +77,7 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardRestoreFailedException; import org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException; @@ -1373,7 +1374,7 @@ public void finalizeSnapshot(final FinalizeSnapshotContext finalizeSnapshotConte final String slmPolicy = slmPolicy(snapshotInfo); final SnapshotDetails snapshotDetails = new SnapshotDetails( snapshotInfo.state(), - Version.CURRENT, + IndexVersion.current(), snapshotInfo.startTime(), snapshotInfo.endTime(), slmPolicy diff --git a/server/src/main/java/org/elasticsearch/reservedstate/action/ReservedClusterSettingsAction.java b/server/src/main/java/org/elasticsearch/reservedstate/action/ReservedClusterSettingsAction.java index 03bd53c0a4c3..e4868a3937e9 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/action/ReservedClusterSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/action/ReservedClusterSettingsAction.java @@ -8,10 +8,11 @@ package org.elasticsearch.reservedstate.action; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; -import org.elasticsearch.action.admin.cluster.settings.TransportClusterUpdateSettingsAction; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.SettingsUpdater; import org.elasticsearch.reservedstate.ReservedClusterStateHandler; import org.elasticsearch.reservedstate.TransformState; import org.elasticsearch.xcontent.XContentParser; @@ -31,6 +32,8 @@ */ public class ReservedClusterSettingsAction implements ReservedClusterStateHandler> { + private static final Logger logger = LogManager.getLogger(ReservedClusterSettingsAction.class); + public static final String NAME = "cluster_settings"; private final ClusterSettings clusterSettings; @@ -73,12 +76,13 @@ public TransformState transform(Object input, TransformState prevState) { validate(request); } - ClusterState state = prevState.state(); - - TransportClusterUpdateSettingsAction.ClusterUpdateSettingsTask updateSettingsTask = - new TransportClusterUpdateSettingsAction.ClusterUpdateSettingsTask(clusterSettings, request); + final var state = new SettingsUpdater(clusterSettings).updateSettings( + prevState.state(), + request.transientSettings(), + request.persistentSettings(), + logger + ); - state = updateSettingsTask.execute(state); Set currentKeys = request.persistentSettings() .keySet() .stream() diff --git a/server/src/main/java/org/elasticsearch/rest/RestController.java b/server/src/main/java/org/elasticsearch/rest/RestController.java index 3c7a2807f06f..ac2d7aeedf3d 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestController.java +++ b/server/src/main/java/org/elasticsearch/rest/RestController.java @@ -96,15 +96,14 @@ public class RestController implements HttpServerTransport.Dispatcher { private final UsageService usageService; private final Tracer tracer; // If true, the ServerlessScope annotations will be enforced - private final boolean serverlessEnabled; + private final ServerlessApiProtections apiProtections; public RestController( UnaryOperator handlerWrapper, NodeClient client, CircuitBreakerService circuitBreakerService, UsageService usageService, - Tracer tracer, - boolean serverlessEnabled + Tracer tracer ) { this.usageService = usageService; this.tracer = tracer; @@ -115,7 +114,11 @@ public RestController( this.client = client; this.circuitBreakerService = circuitBreakerService; registerHandlerNoWrap(RestRequest.Method.GET, "/favicon.ico", RestApiVersion.current(), new RestFavIconHandler()); - this.serverlessEnabled = serverlessEnabled; + this.apiProtections = new ServerlessApiProtections(false); + } + + public ServerlessApiProtections getApiProtections() { + return apiProtections; } /** @@ -374,7 +377,7 @@ private void dispatchRequest(RestRequest request, RestChannel channel, RestHandl } } RestChannel responseChannel = channel; - if (serverlessEnabled) { + if (apiProtections.isEnabled()) { Scope scope = handler.getServerlessScope(); if (scope == null) { handleServerlessRequestToProtectedResource(request.uri(), request.method(), responseChannel); diff --git a/server/src/main/java/org/elasticsearch/rest/ServerlessApiProtections.java b/server/src/main/java/org/elasticsearch/rest/ServerlessApiProtections.java new file mode 100644 index 000000000000..d06eb4db62e9 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/rest/ServerlessApiProtections.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.rest; + +public class ServerlessApiProtections { + + private volatile boolean enabled; + + public ServerlessApiProtections(boolean enabled) { + this.enabled = enabled; + } + + public boolean isEnabled() { + return enabled; + } + + public void setEnabled(boolean enabled) { + this.enabled = enabled; + } +} diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCancelTasksAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCancelTasksAction.java index 86a8295e872a..5e8dac7fef11 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCancelTasksAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCancelTasksAction.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.tasks.TaskId; import java.io.IOException; @@ -21,8 +22,10 @@ import java.util.function.Supplier; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.Scope.INTERNAL; import static org.elasticsearch.rest.action.admin.cluster.RestListTasksAction.listTasksResponseListener; +@ServerlessScope(INTERNAL) public class RestCancelTasksAction extends BaseRestHandler { private final Supplier nodesInCluster; diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetTaskAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetTaskAction.java index a2f948b448e9..0b7f9f3907ee 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetTaskAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetTaskAction.java @@ -13,6 +13,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.tasks.TaskId; @@ -20,7 +21,9 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.Scope.PUBLIC; +@ServerlessScope(PUBLIC) public class RestGetTaskAction extends BaseRestHandler { @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java index cbf8baa9a2ea..3fa9a104ca71 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java @@ -18,6 +18,7 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; import org.elasticsearch.rest.action.RestChunkedToXContentListener; import org.elasticsearch.tasks.TaskId; @@ -27,7 +28,9 @@ import java.util.function.Supplier; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.Scope.INTERNAL; +@ServerlessScope(INTERNAL) public class RestListTasksAction extends BaseRestHandler { private final Supplier nodesInCluster; diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAddIndexBlockAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAddIndexBlockAction.java index 4a875454ad54..96b7bf0100fd 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAddIndexBlockAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAddIndexBlockAction.java @@ -15,13 +15,16 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.Scope.PUBLIC; +@ServerlessScope(PUBLIC) public class RestAddIndexBlockAction extends BaseRestHandler { @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFlushAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFlushAction.java index 2b0ce6bf7cf7..fb7ac3dbf61b 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFlushAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFlushAction.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; @@ -21,7 +22,9 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.Scope.INTERNAL; +@ServerlessScope(INTERNAL) public class RestFlushAction extends BaseRestHandler { @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesSegmentsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesSegmentsAction.java index fde5d5018716..e293cf86d455 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesSegmentsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesSegmentsAction.java @@ -16,6 +16,8 @@ import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; import org.elasticsearch.rest.action.RestChunkedToXContentListener; @@ -24,6 +26,7 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; +@ServerlessScope(Scope.INTERNAL) public class RestIndicesSegmentsAction extends BaseRestHandler { private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(RestIndicesSegmentsAction.class); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java index 00900ebd6ee8..12c3a28d7978 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java @@ -55,6 +55,7 @@ public final RestChannelConsumer prepareRequest(final RestRequest request, final return channel -> client.admin().indices().resizeIndex(resizeRequest, new RestToXContentListener<>(channel)); } + // no @ServerlessScope on purpose, not available public static class RestShrinkIndexAction extends RestResizeHandler { @Override @@ -74,6 +75,7 @@ protected ResizeType getResizeType() { } + // no @ServerlessScope on purpose, not available public static class RestSplitIndexAction extends RestResizeHandler { @Override @@ -93,6 +95,7 @@ protected ResizeType getResizeType() { } + // no @ServerlessScope on purpose, not available public static class RestCloneIndexAction extends RestResizeHandler { @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestRepositoriesAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestRepositoriesAction.java index 207a9765cb96..4e32a3635872 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestRepositoriesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestRepositoriesAction.java @@ -15,6 +15,8 @@ import org.elasticsearch.common.Table; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.Scope; +import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestResponseListener; import java.util.List; @@ -24,6 +26,7 @@ /** * Cat API class to display information about snapshot repositories */ +@ServerlessScope(Scope.INTERNAL) public class RestRepositoriesAction extends AbstractCatAction { @Override diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 795228fabedc..643aa6cff272 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -713,33 +713,37 @@ public void executeQueryPhase(QuerySearchRequest request, SearchShardTask task, final ReaderContext readerContext = findReaderContext(request.contextId(), request.shardSearchRequest()); final ShardSearchRequest shardSearchRequest = readerContext.getShardSearchRequest(request.shardSearchRequest()); final Releasable markAsUsed = readerContext.markAsUsed(getKeepAlive(shardSearchRequest)); - runAsync(getExecutor(readerContext.indexShard()), () -> { - readerContext.setAggregatedDfs(request.dfs()); - try ( - SearchContext searchContext = createContext(readerContext, shardSearchRequest, task, ResultsType.QUERY, true); - SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(searchContext) - ) { - searchContext.searcher().setAggregatedDfs(request.dfs()); - QueryPhase.execute(searchContext); - if (searchContext.queryResult().hasSearchContext() == false && readerContext.singleSession()) { - // no hits, we can release the context since there will be no fetch phase - freeReaderContext(readerContext.id()); + rewriteAndFetchShardRequest(readerContext.indexShard(), shardSearchRequest, listener.delegateFailure((l, rewritten) -> { + // fork the execution in the search thread pool + runAsync(getExecutor(readerContext.indexShard()), () -> { + readerContext.setAggregatedDfs(request.dfs()); + try ( + SearchContext searchContext = createContext(readerContext, shardSearchRequest, task, ResultsType.QUERY, true); + SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(searchContext) + ) { + searchContext.searcher().setAggregatedDfs(request.dfs()); + QueryPhase.execute(searchContext); + if (searchContext.queryResult().hasSearchContext() == false && readerContext.singleSession()) { + // no hits, we can release the context since there will be no fetch phase + freeReaderContext(readerContext.id()); + } + executor.success(); + // Pass the rescoreDocIds to the queryResult to send them the coordinating node + // and receive them back in the fetch phase. + // We also pass the rescoreDocIds to the LegacyReaderContext in case the search state needs to stay in the data node. + final RescoreDocIds rescoreDocIds = searchContext.rescoreDocIds(); + searchContext.queryResult().setRescoreDocIds(rescoreDocIds); + readerContext.setRescoreDocIds(rescoreDocIds); + searchContext.queryResult().incRef(); + return searchContext.queryResult(); + } catch (Exception e) { + assert TransportActions.isShardNotAvailableException(e) == false : new AssertionError(e); + logger.trace("Query phase failed", e); + // we handle the failure in the failure listener below + throw e; } - executor.success(); - // Pass the rescoreDocIds to the queryResult to send them the coordinating node and receive them back in the fetch phase. - // We also pass the rescoreDocIds to the LegacyReaderContext in case the search state needs to stay in the data node. - final RescoreDocIds rescoreDocIds = searchContext.rescoreDocIds(); - searchContext.queryResult().setRescoreDocIds(rescoreDocIds); - readerContext.setRescoreDocIds(rescoreDocIds); - searchContext.queryResult().incRef(); - return searchContext.queryResult(); - } catch (Exception e) { - assert TransportActions.isShardNotAvailableException(e) == false : new AssertionError(e); - logger.trace("Query phase failed", e); - // we handle the failure in the failure listener below - throw e; - } - }, wrapFailureListener(listener, readerContext, markAsUsed)); + }, wrapFailureListener(l, readerContext, markAsUsed)); + })); } private Executor getExecutor(IndexShard indexShard) { @@ -1662,10 +1666,10 @@ private void rewriteAndFetchShardRequest(IndexShard shard, ShardSearchRequest re shard.ensureShardSearchActive(b -> l.onResponse(request)); } }); - // we also do rewrite on the coordinating node (TransportSearchService) but we also need to do it here for BWC as well as - // AliasFilters that might need to be rewritten. These are edge-cases but we are every efficient doing the rewrite here so it's not - // adding a lot of overhead - Rewriteable.rewriteAndFetch(request.getRewriteable(), indicesService.getRewriteContext(request::nowInMillis), actionListener); + // we also do rewrite on the coordinating node (TransportSearchService) but we also need to do it here. + // AliasFilters and other things may need to be rewritten on the data node, but not per individual shard. + // These are uncommon-cases but we are very efficient doing the rewrite here. + Rewriteable.rewriteAndFetch(request.getRewriteable(), indicesService.getDataRewriteContext(request::nowInMillis), actionListener); } /** diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java index 7629f3893d33..5f16a953f802 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java @@ -451,8 +451,15 @@ public String getDescription() { MappedFieldType fieldType = fieldType(); if (fieldType != null) { - return "Field [" + fieldType.name() + "] of type [" + fieldType.typeName() + "]"; + String typeName = fieldType.typeName(); + String valuesSourceTypeName = valuesSourceType.typeName(); + if (valuesSourceType instanceof TimeSeriesValuesSourceType) { + return "Field [" + fieldType.name() + "] of type [" + typeName + "][" + valuesSourceTypeName + "]"; + } else { + // Avoid repeated names. Currently only time series values source types have a different behaviour/validation. + return "Field [" + fieldType.name() + "] of type [" + typeName + "]"; + } } - return "unmapped field"; + return "unmapped field with value source type [" + valuesSourceType.typeName() + "]"; } } diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 4b941a8eb251..016d208d591e 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -981,7 +981,7 @@ static void validateSnapshotRestorable( RestoreSnapshotRequest request, RepositoryMetadata repository, SnapshotInfo snapshotInfo, - List> preRestoreVersionChecks + List> preRestoreVersionChecks ) { if (snapshotInfo.state().restorable() == false) { throw new SnapshotRestoreException( @@ -989,7 +989,7 @@ static void validateSnapshotRestorable( "unsupported snapshot state [" + snapshotInfo.state() + "]" ); } - if (Version.CURRENT.before(snapshotInfo.version())) { + if (IndexVersion.current().before(snapshotInfo.version())) { throw new SnapshotRestoreException( new Snapshot(repository.name(), snapshotInfo.snapshotId()), "the snapshot was created with Elasticsearch version [" @@ -1290,15 +1290,12 @@ public ClusterState execute(ClusterState currentState) { request.indexSettings(), request.ignoreIndexSettings() ); - if (snapshotIndexMetadata.getCompatibilityVersion().before(minIndexCompatibilityVersion.toVersion())) { + if (snapshotIndexMetadata.getCompatibilityVersion().before(minIndexCompatibilityVersion)) { // adapt index metadata so that it can be understood by current version snapshotIndexMetadata = convertLegacyIndex(snapshotIndexMetadata, currentState, indicesService); } try { - snapshotIndexMetadata = indexMetadataVerifier.verifyIndexMetadata( - snapshotIndexMetadata, - minIndexCompatibilityVersion.toVersion() - ); + snapshotIndexMetadata = indexMetadataVerifier.verifyIndexMetadata(snapshotIndexMetadata, minIndexCompatibilityVersion); } catch (Exception ex) { throw new SnapshotRestoreException(snapshot, "cannot restore index [" + index + "] because it cannot be upgraded", ex); } @@ -1597,7 +1594,7 @@ private static IndexMetadata convertLegacyIndex( ClusterState clusterState, IndicesService indicesService ) { - if (snapshotIndexMetadata.getCreationVersion().before(Version.fromString("5.0.0"))) { + if (snapshotIndexMetadata.getCreationVersion().before(IndexVersion.fromId(5000099))) { throw new IllegalArgumentException("can't restore an index created before version 5.0.0"); } IndexMetadata.Builder convertedIndexMetadataBuilder = IndexMetadata.builder(snapshotIndexMetadata); diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java index c16ddd831f78..32cd9b8b7446 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java @@ -7,7 +7,6 @@ */ package org.elasticsearch.snapshots; -import org.elasticsearch.Version; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; import org.elasticsearch.cluster.SnapshotsInProgress; @@ -20,6 +19,7 @@ import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.repositories.RepositoryShardId; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xcontent.ConstructingObjectParser; @@ -188,7 +188,7 @@ public SnapshotInfo build() { } SnapshotState snapshotState = state == null ? null : SnapshotState.valueOf(state); - Version version = this.version == -1 ? Version.CURRENT : Version.fromId(this.version); + IndexVersion version = this.version == -1 ? IndexVersion.current() : IndexVersion.fromId(this.version); int totalShards = shardStatsBuilder == null ? 0 : shardStatsBuilder.getTotalShards(); int successfulShards = shardStatsBuilder == null ? 0 : shardStatsBuilder.getSuccessfulShards(); @@ -313,7 +313,7 @@ int getSuccessfulShards() { private final Map userMetadata; @Nullable - private final Version version; + private final IndexVersion version; private final List shardFailures; @@ -350,7 +350,7 @@ public SnapshotInfo( List indices, List dataStreams, List featureStates, - Version version, + IndexVersion version, SnapshotState state ) { this( @@ -389,7 +389,7 @@ public static SnapshotInfo inProgress(SnapshotsInProgress.Entry entry) { entry.dataStreams(), entry.featureStates(), null, - Version.CURRENT, + IndexVersion.current(), entry.startTime(), 0L, totalShards, @@ -422,7 +422,7 @@ public SnapshotInfo( dataStreams, featureStates, reason, - Version.CURRENT, + IndexVersion.current(), startTime, endTime, totalShards, @@ -441,7 +441,7 @@ public SnapshotInfo( List dataStreams, List featureStates, String reason, - Version version, + IndexVersion version, long startTime, long endTime, int totalShards, @@ -510,7 +510,7 @@ public static SnapshotInfo readFrom(final StreamInput in) throws IOException { final int totalShards = in.readVInt(); final int successfulShards = in.readVInt(); final List shardFailures = in.readImmutableList(SnapshotShardFailure::new); - final Version version = in.readBoolean() ? Version.readVersion(in) : null; + final IndexVersion version = in.readBoolean() ? IndexVersion.readVersion(in) : null; final Boolean includeGlobalState = in.readOptionalBoolean(); final Map userMetadata = in.readMap(); final List dataStreams = in.readImmutableStringList(); @@ -666,7 +666,7 @@ public List shardFailures() { * @return version of elasticsearch that the snapshot was created with */ @Nullable - public Version version() { + public IndexVersion version() { return version; } @@ -769,7 +769,7 @@ public XContentBuilder toXContentExternal(final XContentBuilder builder, final T } if (version != null) { - builder.field(VERSION_ID, version.id); + builder.field(VERSION_ID, version.id()); builder.field(VERSION, version.toString()); } @@ -848,7 +848,7 @@ public XContentBuilder toXContent(final XContentBuilder builder, final ToXConten builder.field(NAME, snapshotId.getName()); builder.field(UUID, snapshotId.getUUID()); assert version != null : "version must always be known when writing a snapshot metadata blob"; - builder.field(VERSION_ID, version.id); + builder.field(VERSION_ID, version.id()); builder.startArray(INDICES); for (String index : indices) { builder.value(index); @@ -903,7 +903,7 @@ public XContentBuilder toXContent(final XContentBuilder builder, final ToXConten public static SnapshotInfo fromXContentInternal(final String repoName, final XContentParser parser) throws IOException { String name = null; String uuid = null; - Version version = Version.CURRENT; + IndexVersion version = IndexVersion.current(); SnapshotState state = SnapshotState.IN_PROGRESS; String reason = null; List indices = Collections.emptyList(); @@ -954,7 +954,7 @@ public static SnapshotInfo fromXContentInternal(final String repoName, final XCo successfulShards = parser.intValue(); break; case VERSION_ID: - version = Version.fromId(parser.intValue()); + version = IndexVersion.fromId(parser.intValue()); break; case INCLUDE_GLOBAL_STATE: includeGlobalState = parser.booleanValue(); @@ -1035,7 +1035,7 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeList(shardFailures); if (version != null) { out.writeBoolean(true); - Version.writeVersion(version, out); + IndexVersion.writeVersion(version, out); } else { out.writeBoolean(false); } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 69db481d3d82..5507d94ae7df 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -72,6 +72,7 @@ import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.SystemDataStreamDescriptor; import org.elasticsearch.indices.SystemIndices; @@ -2168,7 +2169,7 @@ public static Version minCompatibleVersion( for (SnapshotId snapshotId : snapshotIds.stream() .filter(excluded == null ? sn -> true : Predicate.not(excluded::contains)) .toList()) { - final Version known = repositoryData.getVersion(snapshotId); + final IndexVersion known = repositoryData.getVersion(snapshotId); // If we don't have the version cached in the repository data yet we load it from the snapshot info blobs if (known == null) { assert repositoryData.shardGenerations().totalShards() == 0 @@ -2179,7 +2180,7 @@ public static Version minCompatibleVersion( + "]"; return OLD_SNAPSHOT_FORMAT; } else { - minCompatVersion = minCompatVersion.before(known) ? minCompatVersion : known; + minCompatVersion = minCompatVersion.before(known.toVersion()) ? minCompatVersion : known.toVersion(); } } return minCompatVersion; diff --git a/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java index eec69f24e5ed..d802b2502936 100644 --- a/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java +++ b/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java @@ -302,7 +302,7 @@ public void onFailure(Exception e) { new VersionInformation( Version.CURRENT.minimumCompatibilityVersion(), IndexVersion.MINIMUM_COMPATIBLE, - IndexVersion.CURRENT + IndexVersion.current() ) ); diff --git a/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java index ebcd759e14e3..4a9723690460 100644 --- a/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java +++ b/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java @@ -495,7 +495,7 @@ private static DiscoveryNode resolveSeedNode(String clusterAlias, String address var seedVersion = new VersionInformation( Version.CURRENT.minimumCompatibilityVersion(), IndexVersion.MINIMUM_COMPATIBLE, - IndexVersion.CURRENT + IndexVersion.current() ); if (proxyAddress == null || proxyAddress.isEmpty()) { TransportAddress transportAddress = new TransportAddress(parseConfiguredAddress(address)); diff --git a/server/src/main/java/org/elasticsearch/transport/TransportKeepAlive.java b/server/src/main/java/org/elasticsearch/transport/TransportKeepAlive.java index 2a6bd9fb32f1..318b1f5b5e6f 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportKeepAlive.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportKeepAlive.java @@ -12,10 +12,9 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.AsyncBiFunction; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.metrics.CounterMetric; -import org.elasticsearch.common.util.concurrent.AbstractLifecycleRunnable; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.core.TimeValue; import org.elasticsearch.threadpool.ThreadPool; @@ -53,15 +52,13 @@ final class TransportKeepAlive implements Closeable { private final CounterMetric successfulPings = new CounterMetric(); private final CounterMetric failedPings = new CounterMetric(); private final ConcurrentMap pingIntervals = ConcurrentCollections.newConcurrentMap(); - private final Lifecycle lifecycle = new Lifecycle(); private final ThreadPool threadPool; private final AsyncBiFunction pingSender; + private volatile boolean isClosed; TransportKeepAlive(ThreadPool threadPool, AsyncBiFunction pingSender) { this.threadPool = threadPool; this.pingSender = pingSender; - - this.lifecycle.moveToStarted(); } void registerNodeConnection(List nodeChannels, ConnectionProfile connectionProfile) { @@ -124,13 +121,10 @@ public void onFailure(Exception e) { @Override public void close() { - synchronized (lifecycle) { - lifecycle.moveToStopped(); - lifecycle.moveToClosed(); - } + isClosed = true; } - private class ScheduledPing extends AbstractLifecycleRunnable { + private class ScheduledPing extends AbstractRunnable { private final TimeValue pingInterval; @@ -140,7 +134,6 @@ private class ScheduledPing extends AbstractLifecycleRunnable { private volatile long lastPingRelativeMillis; private ScheduledPing(TimeValue pingInterval) { - super(lifecycle, logger); this.pingInterval = pingInterval; this.lastPingRelativeMillis = threadPool.relativeTimeInMillis(); } @@ -160,7 +153,11 @@ void removeChannel(TcpChannel channel) { } @Override - protected void doRunInLifecycle() { + protected void doRun() throws Exception { + if (isClosed) { + return; + } + for (TcpChannel channel : channels) { // In the future it is possible that we may want to kill a channel if we have not read from // the channel since the last ping. However, this will need to be backwards compatible with @@ -173,7 +170,11 @@ protected void doRunInLifecycle() { } @Override - protected void onAfterInLifecycle() { + public void onAfter() { + if (isClosed) { + return; + } + threadPool.scheduleUnlessShuttingDown(pingInterval, ThreadPool.Names.GENERIC, this); } diff --git a/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrator.java b/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrator.java index b7b5829a9c43..a67ca691eb8f 100644 --- a/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrator.java +++ b/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrator.java @@ -55,7 +55,7 @@ import java.util.function.Consumer; import java.util.stream.Collectors; -import static org.elasticsearch.action.admin.cluster.migration.TransportGetFeatureUpgradeStatusAction.NO_UPGRADE_REQUIRED_VERSION; +import static org.elasticsearch.action.admin.cluster.migration.TransportGetFeatureUpgradeStatusAction.NO_UPGRADE_REQUIRED_INDEX_VERSION; import static org.elasticsearch.cluster.metadata.IndexMetadata.State.CLOSE; import static org.elasticsearch.core.Strings.format; @@ -359,7 +359,7 @@ private static boolean needsToBeMigrated(IndexMetadata indexMetadata) { if (indexMetadata == null) { return false; } - return indexMetadata.isSystem() && indexMetadata.getCreationVersion().before(NO_UPGRADE_REQUIRED_VERSION); + return indexMetadata.isSystem() && indexMetadata.getCreationVersion().before(NO_UPGRADE_REQUIRED_INDEX_VERSION); } private void migrateSingleIndex(ClusterState clusterState, Consumer listener) { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponseTests.java index b7242c4ffcee..1488b6ac519f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponseTests.java @@ -8,8 +8,8 @@ package org.elasticsearch.action.admin.cluster.migration; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.AbstractWireSerializingTestCase; import java.util.Collections; @@ -89,7 +89,7 @@ public void testUpgradeStatusCominations() { private static GetFeatureUpgradeStatusResponse.FeatureUpgradeStatus createFeatureStatus() { return new GetFeatureUpgradeStatusResponse.FeatureUpgradeStatus( randomAlphaOfLengthBetween(3, 20), - randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()), + randomFrom(IndexVersion.current(), IndexVersion.MINIMUM_COMPATIBLE), randomFrom(org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse.UpgradeStatus.values()), randomList(4, GetFeatureUpgradeStatusResponseTests::getIndexInfo) ); @@ -98,7 +98,7 @@ private static GetFeatureUpgradeStatusResponse.FeatureUpgradeStatus createFeatur private static GetFeatureUpgradeStatusResponse.IndexInfo getIndexInfo() { return new GetFeatureUpgradeStatusResponse.IndexInfo( randomAlphaOfLengthBetween(3, 20), - randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()), + randomFrom(IndexVersion.current(), IndexVersion.MINIMUM_COMPATIBLE), null ); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusActionTests.java index cb6f711f6b1e..a52f0cfd080e 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusActionTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.indices.SystemIndexDescriptorUtils; import org.elasticsearch.indices.SystemIndices; @@ -24,13 +25,14 @@ import static org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse.UpgradeStatus.MIGRATION_NEEDED; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; public class TransportGetFeatureUpgradeStatusActionTests extends ESTestCase { public static String TEST_SYSTEM_INDEX_PATTERN = ".test*"; + private static final IndexVersion TEST_OLD_VERSION = IndexVersion.fromId(6000099); private static final ClusterState CLUSTER_STATE = getClusterState(); private static final SystemIndices.Feature FEATURE = getFeature(); - private static final Version TEST_OLD_VERSION = Version.fromString("6.0.0"); public void testGetFeatureStatus() { GetFeatureUpgradeStatusResponse.FeatureUpgradeStatus status = TransportGetFeatureUpgradeStatusAction.getFeatureUpgradeStatus( @@ -41,7 +43,7 @@ public void testGetFeatureStatus() { assertThat(status.getUpgradeStatus(), equalTo(MIGRATION_NEEDED)); assertThat(status.getFeatureName(), equalTo("test-feature")); assertThat(status.getMinimumIndexVersion(), equalTo(TEST_OLD_VERSION)); - assertThat(status.getIndexVersions().size(), equalTo(2)); // additional testing below + assertThat(status.getIndexVersions(), hasSize(2)); // additional testing below } public void testGetIndexInfos() { @@ -50,11 +52,11 @@ public void testGetIndexInfos() { FEATURE ); - assertThat(versions.size(), equalTo(2)); + assertThat(versions, hasSize(2)); { GetFeatureUpgradeStatusResponse.IndexInfo version = versions.get(0); - assertThat(version.getVersion(), equalTo(Version.CURRENT)); + assertThat(version.getVersion(), equalTo(IndexVersion.current())); assertThat(version.getIndexName(), equalTo(".test-index-1")); } { @@ -77,7 +79,7 @@ private static SystemIndices.Feature getFeature() { private static ClusterState getClusterState() { IndexMetadata indexMetadata1 = IndexMetadata.builder(".test-index-1") - .settings(Settings.builder().put("index.version.created", Version.CURRENT).build()) + .settings(Settings.builder().put("index.version.created", IndexVersion.current().id()).build()) .numberOfShards(1) .numberOfReplicas(0) .build(); @@ -86,7 +88,7 @@ private static ClusterState getClusterState() { assert Version.CURRENT.major < 9; IndexMetadata indexMetadata2 = IndexMetadata.builder(".test-index-2") - .settings(Settings.builder().put("index.version.created", Version.fromString("6.0.0")).build()) + .settings(Settings.builder().put("index.version.created", TEST_OLD_VERSION.id()).build()) .numberOfShards(1) .numberOfReplicas(0) .build(); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java index 4a1ac0b1afaa..f5302554cec7 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java @@ -122,8 +122,8 @@ public void testToXContentWithDeprecatedClusterState() { "voting_only" ], "version": "%s", - "minIndexVersion": "%s", - "maxIndexVersion": "%s" + "min_index_version": %s, + "max_index_version": %s } }, "transport_versions": [ @@ -198,7 +198,7 @@ public void testToXContentWithDeprecatedClusterState() { clusterState.getNodes().get("node0").getEphemeralId(), Version.CURRENT, IndexVersion.MINIMUM_COMPATIBLE, - IndexVersion.CURRENT, + IndexVersion.current(), Version.CURRENT.id ), """ diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java index 02781a23c97f..cbc7bd2983fe 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardPath; @@ -28,7 +29,6 @@ import java.nio.file.Path; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -54,7 +54,7 @@ protected VersionStats mutateInstance(VersionStats instance) { return new VersionStats(instance.versionStats().stream().map(svs -> { return switch (randomIntBetween(1, 4)) { case 1 -> new VersionStats.SingleVersionStats( - Version.V_7_3_0, + IndexVersion.V_7_3_0, svs.indexCount, svs.primaryShardCount, svs.totalPrimaryByteCount @@ -93,8 +93,8 @@ public void testCreation() { .build(); stats = VersionStats.of(metadata, Collections.emptyList()); assertThat(stats.versionStats().size(), equalTo(2)); - VersionStats.SingleVersionStats s1 = new VersionStats.SingleVersionStats(Version.CURRENT, 2, 7, 0); - VersionStats.SingleVersionStats s2 = new VersionStats.SingleVersionStats(Version.V_7_0_0, 1, 2, 0); + VersionStats.SingleVersionStats s1 = new VersionStats.SingleVersionStats(IndexVersion.current(), 2, 7, 0); + VersionStats.SingleVersionStats s2 = new VersionStats.SingleVersionStats(IndexVersion.V_7_0_0, 1, 2, 0); assertThat(stats.versionStats(), containsInAnyOrder(s1, s2)); ShardId shardId = new ShardId("bar", "uuid", 0); @@ -132,8 +132,8 @@ public void testCreation() { stats = VersionStats.of(metadata, Collections.singletonList(nodeResponse)); assertThat(stats.versionStats().size(), equalTo(2)); - s1 = new VersionStats.SingleVersionStats(Version.CURRENT, 2, 7, 100); - s2 = new VersionStats.SingleVersionStats(Version.V_7_0_0, 1, 2, 0); + s1 = new VersionStats.SingleVersionStats(IndexVersion.current(), 2, 7, 100); + s2 = new VersionStats.SingleVersionStats(IndexVersion.V_7_0_0, 1, 2, 0); assertThat(stats.versionStats(), containsInAnyOrder(s1, s2)); } @@ -142,9 +142,9 @@ private static IndexMetadata indexMeta(String name, Version version, int primary } public static VersionStats randomInstance() { - List versions = Arrays.asList(Version.CURRENT, Version.V_7_0_0, Version.V_7_1_0, Version.V_7_2_0); + List versions = List.of(IndexVersion.current(), IndexVersion.V_7_0_0, IndexVersion.V_7_1_0, IndexVersion.V_7_2_0); List stats = new ArrayList<>(); - for (Version v : versions) { + for (IndexVersion v : versions) { VersionStats.SingleVersionStats s = new VersionStats.SingleVersionStats( v, randomIntBetween(10, 20), diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexActionTests.java index b8dbaf80bd7d..ef23ad2b8ee6 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexActionTests.java @@ -53,7 +53,7 @@ public class GetIndexActionTests extends ESSingleNodeTestCase { public void setUp() throws Exception { super.setUp(); - settingsFilter = new SettingsModule(Settings.EMPTY, emptyList(), emptyList(), emptySet()).getSettingsFilter(); + settingsFilter = new SettingsModule(Settings.EMPTY, emptyList(), emptyList()).getSettingsFilter(); threadPool = new TestThreadPool("GetIndexActionTests"); clusterService = getInstanceFromNode(ClusterService.class); indicesService = getInstanceFromNode(IndicesService.class); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveIndexActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveIndexActionTests.java index 8d9665dff2b6..aaf709ef7606 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveIndexActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveIndexActionTests.java @@ -9,10 +9,10 @@ package org.elasticsearch.action.admin.indices.resolve; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.ClusterSettings; @@ -51,7 +51,7 @@ public void testCCSCompatibilityCheck() throws Exception { try { TransportService transportService = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool ); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsActionTests.java index 533a8bfe759c..c31106a30ecf 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsActionTests.java @@ -36,7 +36,6 @@ import java.util.concurrent.TimeUnit; import static java.util.Collections.emptyList; -import static java.util.Collections.emptySet; import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; public class GetSettingsActionTests extends ESTestCase { @@ -78,7 +77,7 @@ protected void masterOperation( public void setUp() throws Exception { super.setUp(); - settingsFilter = new SettingsModule(Settings.EMPTY, emptyList(), emptyList(), emptySet()).getSettingsFilter(); + settingsFilter = new SettingsModule(Settings.EMPTY, emptyList(), emptyList()).getSettingsFilter(); threadPool = new TestThreadPool("GetSettingsActionTests"); clusterService = createClusterService(threadPool); CapturingTransport capturingTransport = new CapturingTransport(); diff --git a/server/src/test/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesActionTests.java b/server/src/test/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesActionTests.java index 533835e40ce7..1f7b436e1ea8 100644 --- a/server/src/test/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesActionTests.java @@ -9,10 +9,10 @@ package org.elasticsearch.action.fieldcaps; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.ClusterSettings; @@ -53,7 +53,7 @@ public void testCCSCompatibilityCheck() throws Exception { try { TransportService transportService = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool ); diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java index 5e2ae52d2a94..238e6e8fe9d6 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.GroupShardsIteratorTests; import org.elasticsearch.cluster.routing.IndexRoutingTable; @@ -464,7 +465,7 @@ private MockTransportService[] startTransport( MockTransportService remoteSeedTransport = RemoteClusterConnectionTests.startTransport( "node_remote" + i, knownNodes, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool ); @@ -504,7 +505,7 @@ public void testCCSRemoteReduceMergeFails() throws Exception { try ( MockTransportService service = MockTransportService.createNewService( settings, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, null @@ -569,7 +570,7 @@ public void testCCSRemoteReduce() throws Exception { try ( MockTransportService service = MockTransportService.createNewService( settings, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, null @@ -824,7 +825,7 @@ public void testCollectSearchShards() throws Exception { try ( MockTransportService service = MockTransportService.createNewService( settings, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, null @@ -1478,7 +1479,7 @@ public void testCCSCompatibilityCheck() throws Exception { try { TransportService transportService = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool ); diff --git a/server/src/test/java/org/elasticsearch/action/support/CancellableFanOutTests.java b/server/src/test/java/org/elasticsearch/action/support/CancellableFanOutTests.java index 55864a07761e..2d9586f58b07 100644 --- a/server/src/test/java/org/elasticsearch/action/support/CancellableFanOutTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/CancellableFanOutTests.java @@ -95,6 +95,43 @@ protected String onCompletion() { } } + public void testSendItemRequestFailure() { + final var future = new PlainActionFuture(); + new CancellableFanOut() { + int counter; + + @Override + protected void sendItemRequest(String item, ActionListener listener) { + final var exception = new ElasticsearchException("simulated"); + if (randomBoolean()) { + throw exception; + } else { + listener.onFailure(exception); + } + } + + @Override + protected void onItemResponse(String item, String itemResponse) { + fail("should not get item response"); + } + + @Override + protected void onItemFailure(String item, Exception e) { + assertEquals("simulated", e.getMessage()); + counter += 1; + } + + @Override + protected String onCompletion() { + assertEquals(3, counter); + return "completed"; + } + }.run(null, List.of("a", "b", "c").iterator(), future); + + assertTrue(future.isDone()); + assertEquals("completed", future.actionGet()); + } + public void testReleaseOnCancellation() { final var task = new CancellableTask(1, "test", "test", "", TaskId.EMPTY_TASK_ID, Map.of()); final var future = new PlainActionFuture(); diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/PostWriteRefreshTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/PostWriteRefreshTests.java index 2e379b2b3938..e45fc51cf6bd 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/PostWriteRefreshTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/PostWriteRefreshTests.java @@ -9,13 +9,13 @@ package org.elasticsearch.action.support.replication; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.refresh.TransportUnpromotableShardRefreshAction; import org.elasticsearch.action.admin.indices.refresh.UnpromotableShardRefreshRequest; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.ShardRouting; @@ -57,7 +57,12 @@ public class PostWriteRefreshTests extends IndexShardTestCase { @Override public void setUp() throws Exception { super.setUp(); - transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, TransportVersion.current(), threadPool); + transportService = MockTransportService.createNewService( + Settings.EMPTY, + VersionInformation.CURRENT, + TransportVersion.current(), + threadPool + ); transportService.start(); transportService.acceptIncomingRequests(); transportService.registerRequestHandler( diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java index 9c6285f3c96b..ba40126610c6 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java @@ -202,8 +202,8 @@ public void testToXContent() throws IOException { "voting_only" ], "version": "%s", - "minIndexVersion":"%s", - "maxIndexVersion":"%s" + "min_index_version":%s, + "max_index_version":%s } }, "transport_versions" : [ @@ -363,10 +363,10 @@ public void testToXContent() throws IOException { ephemeralId, Version.CURRENT, IndexVersion.MINIMUM_COMPATIBLE, - IndexVersion.CURRENT, + IndexVersion.current(), TransportVersion.current(), - IndexVersion.CURRENT, - IndexVersion.CURRENT, + IndexVersion.current(), + IndexVersion.current(), allocationId, allocationId ) @@ -457,8 +457,8 @@ public void testToXContent_FlatSettingTrue_ReduceMappingFalse() throws IOExcepti "voting_only" ], "version" : "%s", - "minIndexVersion" : "%s", - "maxIndexVersion" : "%s" + "min_index_version" : %s, + "max_index_version" : %s } }, "transport_versions" : [ @@ -614,10 +614,10 @@ public void testToXContent_FlatSettingTrue_ReduceMappingFalse() throws IOExcepti ephemeralId, Version.CURRENT, IndexVersion.MINIMUM_COMPATIBLE, - IndexVersion.CURRENT, + IndexVersion.current(), TransportVersion.current(), - IndexVersion.CURRENT, - IndexVersion.CURRENT, + IndexVersion.current(), + IndexVersion.current(), allocationId, allocationId ), @@ -708,8 +708,8 @@ public void testToXContent_FlatSettingFalse_ReduceMappingTrue() throws IOExcepti "voting_only" ], "version" : "%s", - "minIndexVersion" : "%s", - "maxIndexVersion" : "%s" + "min_index_version" : %s, + "max_index_version" : %s } }, "transport_versions" : [ @@ -871,10 +871,10 @@ public void testToXContent_FlatSettingFalse_ReduceMappingTrue() throws IOExcepti ephemeralId, Version.CURRENT, IndexVersion.MINIMUM_COMPATIBLE, - IndexVersion.CURRENT, + IndexVersion.current(), TransportVersion.current(), - IndexVersion.CURRENT, - IndexVersion.CURRENT, + IndexVersion.current(), + IndexVersion.current(), allocationId, allocationId ), diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java index 6553f7db8c35..377ac16992ba 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java @@ -32,11 +32,13 @@ import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.TransportVersionUtils; import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -82,11 +84,15 @@ public void testPreventJoinClusterWithNewerIndices() { .build(); metaBuilder.put(indexMetadata, false); Metadata metadata = metaBuilder.build(); - NodeJoinExecutor.ensureIndexCompatibility(Version.CURRENT, metadata); + NodeJoinExecutor.ensureIndexCompatibility(IndexVersion.MINIMUM_COMPATIBLE, IndexVersion.current(), metadata); expectThrows( IllegalStateException.class, - () -> NodeJoinExecutor.ensureIndexCompatibility(VersionUtils.getPreviousVersion(Version.CURRENT), metadata) + () -> NodeJoinExecutor.ensureIndexCompatibility( + IndexVersion.MINIMUM_COMPATIBLE, + IndexVersionUtils.getPreviousVersion(IndexVersion.current()), + metadata + ) ); } @@ -100,7 +106,10 @@ public void testPreventJoinClusterWithUnsupportedIndices() { .build(); metaBuilder.put(indexMetadata, false); Metadata metadata = metaBuilder.build(); - expectThrows(IllegalStateException.class, () -> NodeJoinExecutor.ensureIndexCompatibility(Version.CURRENT, metadata)); + expectThrows( + IllegalStateException.class, + () -> NodeJoinExecutor.ensureIndexCompatibility(IndexVersion.MINIMUM_COMPATIBLE, IndexVersion.current(), metadata) + ); } public void testPreventJoinClusterWithUnsupportedNodeVersions() { @@ -180,7 +189,7 @@ public void testSuccess() { .build(); metaBuilder.put(indexMetadata, false); Metadata metadata = metaBuilder.build(); - NodeJoinExecutor.ensureIndexCompatibility(Version.CURRENT, metadata); + NodeJoinExecutor.ensureIndexCompatibility(IndexVersion.MINIMUM_COMPATIBLE, IndexVersion.current(), metadata); } public static Settings.Builder randomCompatibleVersionSettings() { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/ComponentTemplateTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/ComponentTemplateTests.java index a1baf8c1c487..89c624711c2d 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/ComponentTemplateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/ComponentTemplateTests.java @@ -120,12 +120,7 @@ private static Settings randomSettings() { } private static DataLifecycle randomLifecycle() { - return switch (randomIntBetween(0, 3)) { - case 0 -> DataLifecycleTests.IMPLICIT_INFINITE_RETENTION; - case 1 -> Template.NO_LIFECYCLE; - case 2 -> DataLifecycleTests.EXPLICIT_INFINITE_RETENTION; - default -> new DataLifecycle(randomMillisUpToYear9999()); - }; + return rarely() ? Template.NO_LIFECYCLE : DataLifecycleTests.randomLifecycle(); } private static Map randomMeta() { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataLifecycleTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataLifecycleTests.java index 6a381f1f764b..a128ba862974 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataLifecycleTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataLifecycleTests.java @@ -11,12 +11,15 @@ import org.elasticsearch.action.admin.indices.rollover.RolloverConditions; import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration; import org.elasticsearch.action.admin.indices.rollover.RolloverConfigurationTests; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.test.AbstractXContentSerializingTestCase; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.ToXContent; @@ -25,6 +28,8 @@ import org.elasticsearch.xcontent.XContentType; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import java.util.Set; import static org.hamcrest.Matchers.containsString; @@ -33,9 +38,6 @@ public class DataLifecycleTests extends AbstractXContentSerializingTestCase { - public static final DataLifecycle EXPLICIT_INFINITE_RETENTION = new DataLifecycle(DataLifecycle.Retention.NULL); - public static final DataLifecycle IMPLICIT_INFINITE_RETENTION = new DataLifecycle((TimeValue) null); - @Override protected Writeable.Reader instanceReader() { return DataLifecycle::new; @@ -43,28 +45,48 @@ protected Writeable.Reader instanceReader() { @Override protected DataLifecycle createTestInstance() { - return switch (randomInt(2)) { - case 0 -> IMPLICIT_INFINITE_RETENTION; - case 1 -> EXPLICIT_INFINITE_RETENTION; - default -> new DataLifecycle(randomMillisUpToYear9999()); - }; + return randomLifecycle(); } @Override protected DataLifecycle mutateInstance(DataLifecycle instance) throws IOException { - if (IMPLICIT_INFINITE_RETENTION.equals(instance)) { - return randomBoolean() ? EXPLICIT_INFINITE_RETENTION : new DataLifecycle(randomMillisUpToYear9999()); - } - if (EXPLICIT_INFINITE_RETENTION.equals(instance)) { - return randomBoolean() ? IMPLICIT_INFINITE_RETENTION : new DataLifecycle(randomMillisUpToYear9999()); + var retention = instance.getDataRetention(); + var downsampling = instance.getDownsampling(); + if (randomBoolean()) { + if (retention == null || retention == DataLifecycle.Retention.NULL) { + retention = randomValueOtherThan(retention, DataLifecycleTests::randomRetention); + } else { + retention = switch (randomInt(2)) { + case 0 -> null; + case 1 -> DataLifecycle.Retention.NULL; + default -> new DataLifecycle.Retention( + TimeValue.timeValueMillis(randomValueOtherThan(retention.value().millis(), ESTestCase::randomMillisUpToYear9999)) + ); + }; + } + } else { + if (downsampling == null || downsampling == DataLifecycle.Downsampling.NULL) { + downsampling = randomValueOtherThan(downsampling, DataLifecycleTests::randomDownsampling); + } else { + downsampling = switch (randomInt(2)) { + case 0 -> null; + case 1 -> DataLifecycle.Downsampling.NULL; + default -> { + if (downsampling.rounds().size() == 1) { + yield new DataLifecycle.Downsampling( + List.of(downsampling.rounds().get(0), nextRound(downsampling.rounds().get(0))) + ); + + } else { + var updatedRounds = new ArrayList<>(downsampling.rounds()); + updatedRounds.remove(randomInt(downsampling.rounds().size() - 1)); + yield new DataLifecycle.Downsampling(updatedRounds); + } + } + }; + } } - return switch (randomInt(2)) { - case 0 -> IMPLICIT_INFINITE_RETENTION; - case 1 -> EXPLICIT_INFINITE_RETENTION; - default -> new DataLifecycle( - randomValueOtherThan(instance.getEffectiveDataRetention().millis(), ESTestCase::randomMillisUpToYear9999) - ); - }; + return new DataLifecycle(retention, downsampling); } @Override @@ -120,4 +142,116 @@ public void testInvalidClusterSetting() { assertThat(exception.getMessage(), equalTo("The rollover conditions cannot be null or blank")); } } + + public void testInvalidDownsamplingConfiguration() { + { + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> new DataLifecycle.Downsampling( + List.of( + new DataLifecycle.Downsampling.Round( + TimeValue.timeValueDays(10), + new DownsampleConfig(new DateHistogramInterval("2h")) + ), + new DataLifecycle.Downsampling.Round( + TimeValue.timeValueDays(3), + new DownsampleConfig(new DateHistogramInterval("2h")) + ) + ) + ) + ); + assertThat( + exception.getMessage(), + equalTo("A downsampling round must have a later 'after' value than the proceeding, 3d is not after 10d.") + ); + } + { + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> new DataLifecycle.Downsampling( + List.of( + new DataLifecycle.Downsampling.Round( + TimeValue.timeValueDays(10), + new DownsampleConfig(new DateHistogramInterval("2h")) + ), + new DataLifecycle.Downsampling.Round( + TimeValue.timeValueDays(30), + new DownsampleConfig(new DateHistogramInterval("2h")) + ) + ) + ) + ); + assertThat(exception.getMessage(), equalTo("Downsampling interval [2h] must be greater than the source index interval [2h].")); + } + { + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> new DataLifecycle.Downsampling( + List.of( + new DataLifecycle.Downsampling.Round( + TimeValue.timeValueDays(10), + new DownsampleConfig(new DateHistogramInterval("2h")) + ), + new DataLifecycle.Downsampling.Round( + TimeValue.timeValueDays(30), + new DownsampleConfig(new DateHistogramInterval("3h")) + ) + ) + ) + ); + assertThat(exception.getMessage(), equalTo("Downsampling interval [3h] must be a multiple of the source index interval [2h].")); + } + { + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> new DataLifecycle.Downsampling(List.of()) + ); + assertThat(exception.getMessage(), equalTo("Downsampling configuration should have at least one round configured.")); + } + } + + @Nullable + public static DataLifecycle randomLifecycle() { + return new DataLifecycle(randomRetention(), randomDownsampling()); + } + + @Nullable + private static DataLifecycle.Retention randomRetention() { + return switch (randomInt(2)) { + case 0 -> null; + case 1 -> DataLifecycle.Retention.NULL; + default -> new DataLifecycle.Retention(TimeValue.timeValueMillis(randomMillisUpToYear9999())); + }; + } + + @Nullable + private static DataLifecycle.Downsampling randomDownsampling() { + return switch (randomInt(2)) { + case 0 -> null; + case 1 -> DataLifecycle.Downsampling.NULL; + default -> { + var count = randomIntBetween(0, 10); + List rounds = new ArrayList<>(); + var previous = new DataLifecycle.Downsampling.Round( + TimeValue.timeValueDays(randomIntBetween(1, 365)), + new DownsampleConfig(new DateHistogramInterval(randomIntBetween(1, 24) + "h")) + ); + rounds.add(previous); + for (int i = 0; i < count; i++) { + DataLifecycle.Downsampling.Round round = nextRound(previous); + rounds.add(round); + previous = round; + } + yield new DataLifecycle.Downsampling(rounds); + } + }; + } + + private static DataLifecycle.Downsampling.Round nextRound(DataLifecycle.Downsampling.Round previous) { + var after = TimeValue.timeValueDays(previous.after().days() + randomIntBetween(1, 10)); + var fixedInterval = new DownsampleConfig( + new DateHistogramInterval((previous.config().getFixedInterval().estimateMillis() * randomIntBetween(2, 5)) + "ms") + ); + return new DataLifecycle.Downsampling.Round(after, fixedInterval); + } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java index 237fee858d77..383c2dbad59a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java @@ -11,10 +11,11 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.mapper.MapperRegistry; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.test.index.IndexVersionUtils; import java.util.Collections; @@ -93,20 +94,20 @@ public void testCustomSimilarity() { .put("index.similarity.my_similarity.after_effect", "l") .build() ); - service.verifyIndexMetadata(src, Version.CURRENT.minimumIndexCompatibilityVersion()); + service.verifyIndexMetadata(src, IndexVersion.MINIMUM_COMPATIBLE); } public void testIncompatibleVersion() { IndexMetadataVerifier service = getIndexMetadataVerifier(); - Version minCompat = Version.CURRENT.minimumIndexCompatibilityVersion(); - Version indexCreated = Version.fromString((minCompat.major - 1) + "." + randomInt(5) + "." + randomInt(5)); + IndexVersion minCompat = IndexVersion.MINIMUM_COMPATIBLE; + IndexVersion indexCreated = IndexVersion.fromId(randomIntBetween(1000099, minCompat.id() - 1)); final IndexMetadata metadata = newIndexMeta( "foo", - Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, indexCreated).build() + Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, indexCreated.id()).build() ); String message = expectThrows( IllegalStateException.class, - () -> service.verifyIndexMetadata(metadata, Version.CURRENT.minimumIndexCompatibilityVersion()) + () -> service.verifyIndexMetadata(metadata, IndexVersion.MINIMUM_COMPATIBLE) ).getMessage(); assertThat( message, @@ -120,16 +121,19 @@ public void testIncompatibleVersion() { + minCompat + "]." + " It should be re-indexed in Elasticsearch " - + minCompat.major + + (Version.CURRENT.major - 1) + ".x before upgrading to " - + Version.CURRENT.toString() + + Version.CURRENT + "." ) ); - indexCreated = VersionUtils.randomVersionBetween(random(), minCompat, Version.CURRENT); - IndexMetadata goodMeta = newIndexMeta("foo", Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, indexCreated).build()); - service.verifyIndexMetadata(goodMeta, Version.CURRENT.minimumIndexCompatibilityVersion()); + indexCreated = IndexVersionUtils.randomVersionBetween(random(), minCompat, IndexVersion.current()); + IndexMetadata goodMeta = newIndexMeta( + "foo", + Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, indexCreated.id()).build() + ); + service.verifyIndexMetadata(goodMeta, IndexVersion.MINIMUM_COMPATIBLE); } private IndexMetadataVerifier getIndexMetadataVerifier() { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java index 73b415752fb5..bc07aa9badea 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -581,17 +581,17 @@ private static void validateIndexName(MetadataCreateIndexService metadataCreateI } public void testCalculateNumRoutingShards() { - assertEquals(1024, MetadataCreateIndexService.calculateNumRoutingShards(1, IndexVersion.CURRENT)); - assertEquals(1024, MetadataCreateIndexService.calculateNumRoutingShards(2, IndexVersion.CURRENT)); - assertEquals(768, MetadataCreateIndexService.calculateNumRoutingShards(3, IndexVersion.CURRENT)); - assertEquals(576, MetadataCreateIndexService.calculateNumRoutingShards(9, IndexVersion.CURRENT)); - assertEquals(1024, MetadataCreateIndexService.calculateNumRoutingShards(512, IndexVersion.CURRENT)); - assertEquals(2048, MetadataCreateIndexService.calculateNumRoutingShards(1024, IndexVersion.CURRENT)); - assertEquals(4096, MetadataCreateIndexService.calculateNumRoutingShards(2048, IndexVersion.CURRENT)); + assertEquals(1024, MetadataCreateIndexService.calculateNumRoutingShards(1, IndexVersion.current())); + assertEquals(1024, MetadataCreateIndexService.calculateNumRoutingShards(2, IndexVersion.current())); + assertEquals(768, MetadataCreateIndexService.calculateNumRoutingShards(3, IndexVersion.current())); + assertEquals(576, MetadataCreateIndexService.calculateNumRoutingShards(9, IndexVersion.current())); + assertEquals(1024, MetadataCreateIndexService.calculateNumRoutingShards(512, IndexVersion.current())); + assertEquals(2048, MetadataCreateIndexService.calculateNumRoutingShards(1024, IndexVersion.current())); + assertEquals(4096, MetadataCreateIndexService.calculateNumRoutingShards(2048, IndexVersion.current())); for (int i = 0; i < 1000; i++) { int randomNumShards = randomIntBetween(1, 10000); - int numRoutingShards = MetadataCreateIndexService.calculateNumRoutingShards(randomNumShards, IndexVersion.CURRENT); + int numRoutingShards = MetadataCreateIndexService.calculateNumRoutingShards(randomNumShards, IndexVersion.current()); if (numRoutingShards <= 1024) { assertTrue("numShards: " + randomNumShards, randomNumShards < 513); assertTrue("numRoutingShards: " + numRoutingShards, numRoutingShards > 512); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index d759952c73ed..84a6e03ce8cc 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.ClusterState; @@ -31,6 +32,7 @@ import org.elasticsearch.indices.IndexTemplateMissingException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.InvalidIndexTemplateException; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -1500,11 +1502,20 @@ public void testResolveLifecycle() throws Exception { final MetadataIndexTemplateService service = getMetadataIndexTemplateService(); ClusterState state = ClusterState.EMPTY_STATE; + DataLifecycle emptyLifecycle = new DataLifecycle(); + DataLifecycle lifecycle30d = new DataLifecycle(TimeValue.timeValueDays(30)); String ct30d = "ct_30d"; state = addComponentTemplate(service, state, ct30d, lifecycle30d); - DataLifecycle lifecycle45d = new DataLifecycle(TimeValue.timeValueDays(45)); + DataLifecycle lifecycle45d = new DataLifecycle( + new DataLifecycle.Retention(TimeValue.timeValueDays(45)), + new DataLifecycle.Downsampling( + List.of( + new DataLifecycle.Downsampling.Round(TimeValue.timeValueDays(30), new DownsampleConfig(new DateHistogramInterval("3h"))) + ) + ) + ); String ct45d = "ct_45d"; state = addComponentTemplate(service, state, ct45d, lifecycle45d); @@ -1513,7 +1524,7 @@ public void testResolveLifecycle() throws Exception { state = addComponentTemplate(service, state, ctNullRetention, lifecycleNullRetention); String ctEmptyLifecycle = "ct_empty_lifecycle"; - state = addComponentTemplate(service, state, ctEmptyLifecycle, DataLifecycleTests.IMPLICIT_INFINITE_RETENTION); + state = addComponentTemplate(service, state, ctEmptyLifecycle, emptyLifecycle); String ctNullLifecycle = "ct_null_lifecycle"; state = addComponentTemplate(service, state, ctNullLifecycle, Template.NO_LIFECYCLE); @@ -1525,13 +1536,7 @@ public void testResolveLifecycle() throws Exception { // Component B: "lifecycle": {} // Composable Z: - // Result: "lifecycle": {} - assertLifecycleResolution( - service, - state, - List.of(ctNoLifecycle, ctEmptyLifecycle), - null, - DataLifecycleTests.IMPLICIT_INFINITE_RETENTION - ); + assertLifecycleResolution(service, state, List.of(ctNoLifecycle, ctEmptyLifecycle), null, emptyLifecycle); // Component A: "lifecycle": {} // Component B: "lifecycle": {"retention": "30d"} @@ -1540,16 +1545,22 @@ public void testResolveLifecycle() throws Exception { assertLifecycleResolution(service, state, List.of(ctEmptyLifecycle, ct30d), null, lifecycle30d); // Component A: "lifecycle": {"retention": "30d"} - // Component B: "lifecycle": {"retention": "45d"} + // Component B: "lifecycle": {"retention": "45d", "downsampling": [{"after": "30d", "fixed_interval": "3h"}]} // Composable Z: "lifecycle": {} - // Result: "lifecycle": {"retention": "45d"} - assertLifecycleResolution(service, state, List.of(ct30d, ct45d), DataLifecycleTests.IMPLICIT_INFINITE_RETENTION, lifecycle45d); + // Result: "lifecycle": {"retention": "45d", "downsampling": [{"after": "30d", "fixed_interval": "3h"}]} + assertLifecycleResolution(service, state, List.of(ct30d, ct45d), emptyLifecycle, lifecycle45d); // Component A: "lifecycle": {} - // Component B: "lifecycle": {"retention": "45d"} + // Component B: "lifecycle": {"retention": "45d", "downsampling": [{"after": "30d", "fixed_interval": "3h"}]} // Composable Z: "lifecycle": {"retention": "30d"} - // Result: "lifecycle": {"retention": "30d"} - assertLifecycleResolution(service, state, List.of(ctEmptyLifecycle, ct45d), lifecycle30d, lifecycle30d); + // Result: "lifecycle": {"retention": "30d", "downsampling": [{"after": "30d", "fixed_interval": "3h"}]} + assertLifecycleResolution( + service, + state, + List.of(ctEmptyLifecycle, ct45d), + lifecycle30d, + new DataLifecycle(lifecycle30d.getDataRetention(), lifecycle45d.getDownsampling()) + ); // Component A: "lifecycle": {"retention": "30d"} // Component B: "lifecycle": {"retention": null} @@ -1559,14 +1570,21 @@ public void testResolveLifecycle() throws Exception { assertLifecycleResolution(service, state, List.of(ct30d, ctNullRetention), null, lifecycleNullRetention); // Component A: "lifecycle": {} - // Component B: "lifecycle": {"retention": "45d"} + // Component B: "lifecycle": {"retention": "45d", "downsampling": [{"after": "30d", "fixed_interval": "3h"}]} // Composable Z: "lifecycle": {"retention": null} - // Result: "lifecycle": {"retention": null} , here the result of the composition is with retention explicitly + // Result: "lifecycle": {"retention": null, "downsampling": [{"after": "30d", "fixed_interval": "3h"}]} , here the result of the + // composition is with retention explicitly // nullified, but effectively this is equivalent to "lifecycle": {} when there is no further composition. - assertLifecycleResolution(service, state, List.of(ctEmptyLifecycle, ct45d), lifecycleNullRetention, lifecycleNullRetention); + assertLifecycleResolution( + service, + state, + List.of(ctEmptyLifecycle, ct45d), + lifecycleNullRetention, + new DataLifecycle(DataLifecycle.Retention.NULL, lifecycle45d.getDownsampling()) + ); // Component A: "lifecycle": {"retention": "30d"} - // Component B: "lifecycle": {"retention": "45d"} + // Component B: "lifecycle": {"retention": "45d", "downsampling": [{"after": "30d", "fixed_interval": "3h"}]} // Composable Z: "lifecycle": null // Result: null aka unmanaged assertLifecycleResolution(service, state, List.of(ct30d, ct45d), Template.NO_LIFECYCLE, null); @@ -1579,8 +1597,8 @@ public void testResolveLifecycle() throws Exception { // Component A: "lifecycle": {"retention": "30d"} // Component B: "lifecycle": null - // Composable Z: "lifecycle": {"retention": "45d"} - // Result: "lifecycle": {"retention": "45d"} + // Composable Z: "lifecycle": {"retention": "45d", "downsampling": [{"after": "30d", "fixed_interval": "3h"}]} + // Result: "lifecycle": {"retention": "45d", "downsampling": [{"after": "30d", "fixed_interval": "3h"}]} assertLifecycleResolution(service, state, List.of(ct30d, ctNullLifecycle), lifecycle45d, lifecycle45d); } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java index a6a62d76313d..f3b08352a3f1 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java @@ -767,18 +767,18 @@ public void testFindMappingsWithFilters() throws IOException { public void testOldestIndexComputation() { Metadata metadata = buildIndicesWithVersions( IndexVersion.V_7_0_0, - IndexVersion.CURRENT, - IndexVersion.fromId(IndexVersion.CURRENT.id() + 1) + IndexVersion.current(), + IndexVersion.fromId(IndexVersion.current().id() + 1) ).build(); assertEquals(IndexVersion.V_7_0_0, metadata.oldestIndexVersion()); Metadata.Builder b = Metadata.builder(); - assertEquals(IndexVersion.CURRENT, b.build().oldestIndexVersion()); + assertEquals(IndexVersion.current(), b.build().oldestIndexVersion()); Throwable ex = expectThrows( IllegalArgumentException.class, - () -> buildIndicesWithVersions(IndexVersion.V_7_0_0, IndexVersion.ZERO, IndexVersion.fromId(IndexVersion.CURRENT.id() + 1)) + () -> buildIndicesWithVersions(IndexVersion.V_7_0_0, IndexVersion.ZERO, IndexVersion.fromId(IndexVersion.current().id() + 1)) .build() ); @@ -803,7 +803,7 @@ private Metadata.Builder buildIndicesWithVersions(IndexVersion... indexVersions) private static IndexMetadata.Builder buildIndexMetadata(String name, String alias, Boolean writeIndex) { return IndexMetadata.builder(name) - .settings(settings(IndexVersion.CURRENT)) + .settings(settings(IndexVersion.current())) .creationDate(randomNonNegativeLong()) .putAlias(AliasMetadata.builder(alias).writeIndex(writeIndex)) .numberOfShards(1) diff --git a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeTests.java b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeTests.java index 3325262ab3ee..7ae05025ce06 100644 --- a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeTests.java @@ -214,15 +214,15 @@ public void testDiscoveryNodeToXContent() { "voting_only" ], "version" : "%s", - "minIndexVersion" : "%s", - "maxIndexVersion" : "%s" + "min_index_version" : %s, + "max_index_version" : %s } }""", transportAddress, withExternalId ? "test-external-id" : "test-name", Version.CURRENT, IndexVersion.MINIMUM_COMPATIBLE, - IndexVersion.CURRENT + IndexVersion.current() ) ) ); diff --git a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java index e743d8c8d8d4..a8a2d05ac4b8 100644 --- a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; @@ -401,48 +402,43 @@ Set matchingNodeIds(DiscoveryNodes nodes) { abstract Set matchingNodeIds(DiscoveryNodes nodes); } - public void testMaxMinNodeVersion() { + public void testMinMaxNodeVersions() { assertEquals(Version.CURRENT, DiscoveryNodes.EMPTY_NODES.getMaxNodeVersion()); assertEquals(Version.CURRENT.minimumCompatibilityVersion(), DiscoveryNodes.EMPTY_NODES.getMinNodeVersion()); - - DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); - discoBuilder.add( - new DiscoveryNode( - "name_" + 1, - "node_" + 1, - buildNewFakeTransportAddress(), - Collections.emptyMap(), - new HashSet<>(randomSubsetOf(DiscoveryNodeRole.roles())), - Version.fromString("5.1.0") - ) + assertEquals(IndexVersion.current(), DiscoveryNodes.EMPTY_NODES.getMaxDataNodeCompatibleIndexVersion()); + assertEquals(IndexVersion.MINIMUM_COMPATIBLE, DiscoveryNodes.EMPTY_NODES.getMinSupportedIndexVersion()); + + // use a mix of versions with major, minor, and patch numbers + List dataVersions = List.of( + new VersionInformation(Version.fromString("3.2.5"), IndexVersion.fromId(2000099), IndexVersion.fromId(3020599)), + new VersionInformation(Version.fromString("3.0.7"), IndexVersion.fromId(2000099), IndexVersion.fromId(3000799)), + new VersionInformation(Version.fromString("2.1.0"), IndexVersion.fromId(1050099), IndexVersion.fromId(2010099)) ); - discoBuilder.add( - new DiscoveryNode( - "name_" + 2, - "node_" + 2, - buildNewFakeTransportAddress(), - Collections.emptyMap(), - new HashSet<>(randomSubsetOf(DiscoveryNodeRole.roles())), - Version.fromString("6.3.0") - ) + List observerVersions = List.of( + new VersionInformation(Version.fromString("5.0.17"), IndexVersion.fromId(0), IndexVersion.fromId(5001799)), + new VersionInformation(Version.fromString("2.0.1"), IndexVersion.fromId(1000099), IndexVersion.fromId(2000199)), + new VersionInformation(Version.fromString("1.6.0"), IndexVersion.fromId(0), IndexVersion.fromId(1060099)) ); - discoBuilder.localNodeId("node_" + between(1, 3)); - if (randomBoolean()) { - discoBuilder.masterNodeId("node_" + between(1, 3)); + + DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); + for (int i = 0; i < dataVersions.size(); i++) { + discoBuilder.add( + DiscoveryNodeUtils.builder("data_" + i) + .version(dataVersions.get(i)) + .roles(Set.of(randomBoolean() ? DiscoveryNodeRole.DATA_ROLE : DiscoveryNodeRole.MASTER_ROLE)) + .build() + ); + } + for (int i = 0; i < observerVersions.size(); i++) { + discoBuilder.add(DiscoveryNodeUtils.builder("observer_" + i).version(observerVersions.get(i)).roles(Set.of()).build()); } - discoBuilder.add( - new DiscoveryNode( - "name_" + 3, - "node_" + 3, - buildNewFakeTransportAddress(), - Collections.emptyMap(), - new HashSet<>(randomSubsetOf(DiscoveryNodeRole.roles())), - Version.fromString("1.1.0") - ) - ); DiscoveryNodes build = discoBuilder.build(); - assertEquals(Version.fromString("6.3.0"), build.getMaxNodeVersion()); - assertEquals(Version.fromString("1.1.0"), build.getMinNodeVersion()); + + assertEquals(Version.fromString("5.0.17"), build.getMaxNodeVersion()); + assertEquals(Version.fromString("1.6.0"), build.getMinNodeVersion()); + assertEquals(Version.fromString("2.1.0"), build.getSmallestNonClientNodeVersion()); // doesn't include 1.6.0 observer + assertEquals(IndexVersion.fromId(2010099), build.getMaxDataNodeCompatibleIndexVersion()); // doesn't include 2000199 observer + assertEquals(IndexVersion.fromId(2000099), build.getMinSupportedIndexVersion()); // also includes observers } private static String noAttr(DiscoveryNode discoveryNode) { @@ -477,7 +473,7 @@ public void accept(Consumer update, long expectedGenerat } }; - final BiFunction nodeVersionFactory = (i, v) -> new DiscoveryNode( + final BiFunction nodeVersionFactory = (i, v) -> new DiscoveryNode( "name" + i, "id" + i, buildNewFakeTransportAddress(), @@ -486,9 +482,12 @@ public void accept(Consumer update, long expectedGenerat v ); - final IntFunction nodeFactory = i -> nodeVersionFactory.apply(i, Version.CURRENT); + final IntFunction nodeFactory = i -> nodeVersionFactory.apply(i, VersionInformation.CURRENT); - final var node0 = nodeVersionFactory.apply(0, VersionUtils.randomVersion(random())); + final var node0 = nodeVersionFactory.apply( + 0, + new VersionInformation(VersionUtils.randomVersion(random()), IndexVersion.MINIMUM_COMPATIBLE, IndexVersion.current()) + ); testHarness.accept(builder -> builder.add(node0), 0L); final var node1 = nodeFactory.apply(1); @@ -511,7 +510,7 @@ public void accept(Consumer update, long expectedGenerat testHarness.accept(builder -> builder.remove(node2), 2L); // if old nodes are present then the generation is forced to zero - final var node3 = nodeVersionFactory.apply(3, Version.V_8_8_0); + final var node3 = nodeVersionFactory.apply(3, VersionInformation.inferVersions(Version.V_8_8_0)); testHarness.accept(builder -> builder.add(node3), 0L); // and it remains at zero while the old node is present diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/BatchedRerouteServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/BatchedRerouteServiceTests.java index f462fa46f228..fe7c36ff458d 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/BatchedRerouteServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/BatchedRerouteServiceTests.java @@ -7,10 +7,13 @@ */ package org.elasticsearch.cluster.routing; +import org.apache.logging.log4j.Level; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.NotMasterException; import org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; @@ -18,6 +21,8 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; @@ -28,6 +33,7 @@ import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; @@ -229,6 +235,102 @@ public void testNotifiesOnFailure() throws InterruptedException { } } - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); // i.e. it doesn't leak any listeners + safeAwait(countDownLatch); // i.e. it doesn't leak any listeners + } + + @TestLogging(reason = "testing log output", value = "org.elasticsearch.cluster.routing.BatchedRerouteService:DEBUG") + public void testExceptionFidelity() { + + final var mockLogAppender = new MockLogAppender(); + try (var ignored = mockLogAppender.capturing(BatchedRerouteService.class)) { + + clusterService.getMasterService() + .setClusterStatePublisher( + (event, publishListener, ackListener) -> publishListener.onFailure(new FailedToCommitClusterStateException("simulated")) + ); + + // Case 1: an exception thrown from within the reroute itself + + mockLogAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "failure within reroute", + BatchedRerouteService.class.getCanonicalName(), + Level.ERROR, + "unexpected failure" + ) + ); + + final BatchedRerouteService failingRerouteService = new BatchedRerouteService(clusterService, (s, r, l) -> { + throw new ElasticsearchException("simulated"); + }); + final var rerouteFailureFuture = new PlainActionFuture(); + failingRerouteService.reroute("publish failure", randomFrom(EnumSet.allOf(Priority.class)), rerouteFailureFuture); + assertThat( + expectThrows(ExecutionException.class, ElasticsearchException.class, () -> rerouteFailureFuture.get(10, TimeUnit.SECONDS)) + .getMessage(), + equalTo("simulated") + ); + mockLogAppender.assertAllExpectationsMatched(); + + // None of the other cases should yield any log messages by default + + mockLogAppender.addExpectation( + new MockLogAppender.UnseenEventExpectation("no errors", BatchedRerouteService.class.getCanonicalName(), Level.ERROR, "*") + ); + mockLogAppender.addExpectation( + new MockLogAppender.UnseenEventExpectation("no warnings", BatchedRerouteService.class.getCanonicalName(), Level.WARN, "*") + ); + mockLogAppender.addExpectation( + new MockLogAppender.UnseenEventExpectation("no info", BatchedRerouteService.class.getCanonicalName(), Level.INFO, "*") + ); + + // Case 2: a FailedToCommitClusterStateException (see the call to setClusterStatePublisher above) + + final BatchedRerouteService batchedRerouteService = new BatchedRerouteService(clusterService, (s, r, l) -> { + l.onResponse(null); + return ClusterState.builder(s).build(); + }); + + mockLogAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "publish failure", + BatchedRerouteService.class.getCanonicalName(), + Level.DEBUG, + "unexpected failure" + ) + ); + + final var publishFailureFuture = new PlainActionFuture(); + batchedRerouteService.reroute("publish failure", randomFrom(EnumSet.allOf(Priority.class)), publishFailureFuture); + expectThrows( + ExecutionException.class, + FailedToCommitClusterStateException.class, + () -> publishFailureFuture.get(10, TimeUnit.SECONDS) + ); + mockLogAppender.assertAllExpectationsMatched(); + + // Case 3: a NotMasterException + + PlainActionFuture.get(future -> { + clusterService.getClusterApplierService().onNewClusterState("simulated", () -> { + final var state = clusterService.state(); + return ClusterState.builder(state).nodes(state.nodes().withMasterNodeId(null)).build(); + }, future); + }, 10, TimeUnit.SECONDS); + + mockLogAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "not-master failure", + BatchedRerouteService.class.getCanonicalName(), + Level.DEBUG, + "unexpected failure" + ) + ); + final var notMasterFuture = new PlainActionFuture(); + batchedRerouteService.reroute("not-master failure", randomFrom(EnumSet.allOf(Priority.class)), notMasterFuture); + expectThrows(ExecutionException.class, NotMasterException.class, () -> notMasterFuture.get(10, TimeUnit.SECONDS)); + + mockLogAppender.assertAllExpectationsMatched(); + } } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java index 2aa7c911e705..56d3a3910cf5 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java @@ -8,11 +8,11 @@ package org.elasticsearch.cluster.routing; -import org.elasticsearch.Version; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardIdTests; import org.elasticsearch.repositories.IndexId; @@ -345,7 +345,7 @@ public void testEqualsIgnoringVersion() { new RecoverySource.SnapshotRecoverySource( UUIDs.randomBase64UUID(), new Snapshot("test", new SnapshotId("s1", UUIDs.randomBase64UUID())), - Version.CURRENT, + IndexVersion.current(), new IndexId("test", UUIDs.randomBase64UUID(random())) ), otherRouting.unassignedInfo(), diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java index 7bfd65c2f16c..df0175b1200a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java @@ -33,6 +33,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotId; @@ -352,7 +353,7 @@ public void testNewIndexRestored() { new SnapshotRecoverySource( UUIDs.randomBase64UUID(), new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), - Version.CURRENT, + IndexVersion.current(), new IndexId("test", UUIDs.randomBase64UUID(random())) ), new HashSet<>() @@ -436,7 +437,7 @@ public void testExistingIndexRestored() { new SnapshotRecoverySource( UUIDs.randomBase64UUID(), new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), - Version.CURRENT, + IndexVersion.current(), new IndexId("test", UUIDs.randomBase64UUID(random())) ) ) diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java index 572c7ab5f0ed..0723263291f1 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java @@ -46,6 +46,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.snapshots.EmptySnapshotsInfoService; @@ -449,7 +450,7 @@ public void testRestoreDoesNotAllocateSnapshotOnOlderNodes() { RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY) .addAsRestore( metadata.index("test"), - new SnapshotRecoverySource(UUIDs.randomBase64UUID(), snapshot, Version.CURRENT, indexId) + new SnapshotRecoverySource(UUIDs.randomBase64UUID(), snapshot, IndexVersion.current(), indexId) ) .build() ) @@ -608,13 +609,13 @@ public void testMessages() { final SnapshotRecoverySource newVersionSnapshot = new SnapshotRecoverySource( UUIDs.randomBase64UUID(), new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), - newNode.node().getVersion(), + newNode.node().getVersion().indexVersion, indexId ); final SnapshotRecoverySource oldVersionSnapshot = new SnapshotRecoverySource( UUIDs.randomBase64UUID(), new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), - oldNode.node().getVersion(), + oldNode.node().getVersion().indexVersion, indexId ); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesTests.java index 56a276b907ae..18adf3ca32c7 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesTests.java @@ -44,9 +44,8 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.oneOf; public class RoutingNodesTests extends ESAllocationTestCase { @@ -418,47 +417,14 @@ public void testNodeInterleavedShardIterator() { } public void testMoveShardWithDefaultRole() { - - var inSync = randomList(2, 2, UUIDs::randomBase64UUID); - var indexMetadata = IndexMetadata.builder("index") - .settings(indexSettings(Version.CURRENT, 1, 1)) - .putInSyncAllocationIds(0, Set.copyOf(inSync)) - .build(); - - var shardId = new ShardId(indexMetadata.getIndex(), 0); - - var indexRoutingTable = IndexRoutingTable.builder(indexMetadata.getIndex()) - .addShard(TestShardRouting.newShardRouting(shardId, "node-1", null, true, STARTED, ShardRouting.Role.DEFAULT)) - .addShard(TestShardRouting.newShardRouting(shardId, "node-2", null, false, STARTED, ShardRouting.Role.DEFAULT)) - .build(); - - var node1 = newNode("node-1"); - var node2 = newNode("node-2"); - var node3 = newNode("node-3"); - - var clusterState = ClusterState.builder(ClusterName.DEFAULT) - .metadata(Metadata.builder().put(indexMetadata, false).build()) - .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3).build()) - .routingTable(RoutingTable.builder().add(indexRoutingTable).build()) - .build(); - - var routingNodes = clusterState.getRoutingNodes().mutableCopy(); - - routingNodes.relocateOrReinitializeShard( - routingNodes.node("node-1").getByShardId(shardId), - "node-3", - 0L, - new RoutingChangesObserver() { - } - ); - - assertThat(routingNodes.node("node-1").getByShardId(shardId).state(), equalTo(RELOCATING)); - assertThat(routingNodes.node("node-2").getByShardId(shardId).state(), equalTo(STARTED)); - assertThat(routingNodes.node("node-3").getByShardId(shardId).state(), equalTo(INITIALIZING)); + runMoveShardRolesTest(ShardRouting.Role.DEFAULT, ShardRouting.Role.DEFAULT); } public void testMoveShardWithPromotableOnlyRole() { + runMoveShardRolesTest(ShardRouting.Role.INDEX_ONLY, ShardRouting.Role.SEARCH_ONLY); + } + private void runMoveShardRolesTest(ShardRouting.Role primaryRole, ShardRouting.Role replicaRole) { var inSync = randomList(2, 2, UUIDs::randomBase64UUID); var indexMetadata = IndexMetadata.builder("index") .settings(indexSettings(Version.CURRENT, 1, 1)) @@ -468,8 +434,8 @@ public void testMoveShardWithPromotableOnlyRole() { var shardId = new ShardId(indexMetadata.getIndex(), 0); var indexRoutingTable = IndexRoutingTable.builder(indexMetadata.getIndex()) - .addShard(TestShardRouting.newShardRouting(shardId, "node-1", null, true, STARTED, ShardRouting.Role.INDEX_ONLY)) - .addShard(TestShardRouting.newShardRouting(shardId, "node-2", null, false, STARTED, ShardRouting.Role.SEARCH_ONLY)) + .addShard(TestShardRouting.newShardRouting(shardId, "node-1", null, true, STARTED, primaryRole)) + .addShard(TestShardRouting.newShardRouting(shardId, "node-2", null, false, STARTED, replicaRole)) .build(); var node1 = newNode("node-1"); @@ -484,18 +450,13 @@ public void testMoveShardWithPromotableOnlyRole() { var routingNodes = clusterState.getRoutingNodes().mutableCopy(); - routingNodes.relocateOrReinitializeShard( - routingNodes.node("node-1").getByShardId(shardId), - "node-3", - 0L, - new RoutingChangesObserver() { - } - ); + routingNodes.relocateShard(routingNodes.node("node-1").getByShardId(shardId), "node-3", 0L, new RoutingChangesObserver() { + }); - assertThat(routingNodes.node("node-1").getByShardId(shardId), nullValue()); - assertThat(routingNodes.node("node-2").getByShardId(shardId), nullValue()); + assertThat(routingNodes.node("node-1").getByShardId(shardId).state(), equalTo(RELOCATING)); + assertThat(routingNodes.node("node-2").getByShardId(shardId).state(), equalTo(STARTED)); assertThat(routingNodes.node("node-3").getByShardId(shardId).state(), equalTo(INITIALIZING)); - assertThat(routingNodes.unassigned().ignored(), hasSize(1)); + assertThat(routingNodes.unassigned().ignored(), empty()); } private boolean assertShardStats(RoutingNodes routingNodes) { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java index e6d33b106c27..95f2cce79567 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.snapshots.InternalSnapshotsInfoService; @@ -386,7 +387,7 @@ private ClusterState createRecoveryStateAndInitializeAllocations( new SnapshotRecoverySource( restoreUUID, snapshot, - Version.CURRENT, + IndexVersion.current(), new IndexId(indexMetadata.getIndex().getName(), UUIDs.randomBase64UUID(random())) ), new HashSet<>() @@ -399,7 +400,7 @@ private ClusterState createRecoveryStateAndInitializeAllocations( new SnapshotRecoverySource( restoreUUID, snapshot, - Version.CURRENT, + IndexVersion.current(), new IndexId(indexMetadata.getIndex().getName(), UUIDs.randomBase64UUID(random())) ) ); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java index 836365629547..b6ef484de0e7 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java @@ -55,6 +55,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.GatewayAllocator; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.snapshots.InternalSnapshotsInfoService; @@ -556,7 +557,7 @@ public void testUnassignedAllocationPredictsDiskUsage() { final var recoverySource = new RecoverySource.SnapshotRecoverySource( UUIDs.randomBase64UUID(random()), new Snapshot("repo", new SnapshotId("snap", UUIDs.randomBase64UUID(random()))), - Version.CURRENT, + IndexVersion.current(), new IndexId("index", UUIDs.randomBase64UUID(random())) ); routingTable.addAsRestore(restoredIndexMetadata, recoverySource); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java index 8d9f17ba8e85..e9020f29f043 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java @@ -49,6 +49,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.snapshots.EmptySnapshotsInfoService; @@ -1136,7 +1137,7 @@ private void doTestDiskThresholdWithSnapshotShardSizes(boolean testMaxHeadroom) RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY) .addAsNewRestore( indexMetadata, - new RecoverySource.SnapshotRecoverySource("_restore_uuid", snapshot, Version.CURRENT, indexId), + new RecoverySource.SnapshotRecoverySource("_restore_uuid", snapshot, IndexVersion.current(), indexId), new HashSet<>() ) .build() diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDeciderTests.java index c1d5e2a64b07..013717dbd5cb 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDeciderTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.snapshots.Snapshot; @@ -230,7 +231,7 @@ private RecoverySource.SnapshotRecoverySource createSnapshotRecoverySource(final return new RecoverySource.SnapshotRecoverySource( UUIDs.randomBase64UUID(), snapshot, - Version.CURRENT, + IndexVersion.current(), new IndexId("test", UUIDs.randomBase64UUID(random())) ); } diff --git a/server/src/test/java/org/elasticsearch/common/lucene/search/QueriesTests.java b/server/src/test/java/org/elasticsearch/common/lucene/search/QueriesTests.java index fb325a731085..d4b2d34f0d33 100644 --- a/server/src/test/java/org/elasticsearch/common/lucene/search/QueriesTests.java +++ b/server/src/test/java/org/elasticsearch/common/lucene/search/QueriesTests.java @@ -22,12 +22,12 @@ public class QueriesTests extends ESTestCase { public void testNonNestedQuery() { // This is a custom query that extends AutomatonQuery and want to make sure the equals method works - assertEquals(Queries.newNonNestedFilter(IndexVersion.CURRENT), Queries.newNonNestedFilter(IndexVersion.CURRENT)); + assertEquals(Queries.newNonNestedFilter(IndexVersion.current()), Queries.newNonNestedFilter(IndexVersion.current())); assertEquals( - Queries.newNonNestedFilter(IndexVersion.CURRENT).hashCode(), - Queries.newNonNestedFilter(IndexVersion.CURRENT).hashCode() + Queries.newNonNestedFilter(IndexVersion.current()).hashCode(), + Queries.newNonNestedFilter(IndexVersion.current()).hashCode() ); - assertEquals(Queries.newNonNestedFilter(IndexVersion.CURRENT), new FieldExistsQuery(SeqNoFieldMapper.PRIMARY_TERM_NAME)); + assertEquals(Queries.newNonNestedFilter(IndexVersion.current()), new FieldExistsQuery(SeqNoFieldMapper.PRIMARY_TERM_NAME)); } public void testIsNegativeQuery() { diff --git a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java index af1260c8ee36..cc3764594e00 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.transport.TransportSettings; import java.io.IOException; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -42,7 +41,6 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.startsWith; import static org.hamcrest.Matchers.hasToString; -import static org.hamcrest.Matchers.sameInstance; public class ScopedSettingsTests extends ESTestCase { @@ -1300,187 +1298,4 @@ public void testPrivateIndexSettingsSkipValidation() { indexScopedSettings.validate(settings, false, /* validateInternalOrPrivateIndex */ false); } - public void testUpgradeSetting() { - final Setting oldSetting = Setting.simpleString("foo.old", Property.NodeScope); - final Setting newSetting = Setting.simpleString("foo.new", Property.NodeScope); - final Setting remainingSetting = Setting.simpleString("foo.remaining", Property.NodeScope); - - final AbstractScopedSettings service = new ClusterSettings( - Settings.EMPTY, - new HashSet<>(Arrays.asList(oldSetting, newSetting, remainingSetting)), - Collections.singleton(new SettingUpgrader() { - - @Override - public Setting getSetting() { - return oldSetting; - } - - @Override - public String getKey(final String key) { - return "foo.new"; - } - - @Override - public String getValue(final String value) { - return "new." + value; - } - - }) - ); - - final Settings settings = Settings.builder() - .put("foo.old", randomAlphaOfLength(8)) - .put("foo.remaining", randomAlphaOfLength(8)) - .build(); - final Settings upgradedSettings = service.upgradeSettings(settings); - assertFalse(oldSetting.exists(upgradedSettings)); - assertTrue(newSetting.exists(upgradedSettings)); - assertThat(newSetting.get(upgradedSettings), equalTo("new." + oldSetting.get(settings))); - assertTrue(remainingSetting.exists(upgradedSettings)); - assertThat(remainingSetting.get(upgradedSettings), equalTo(remainingSetting.get(settings))); - } - - public void testUpgradeSettingsNoChangesPreservesInstance() { - final Setting oldSetting = Setting.simpleString("foo.old", Property.NodeScope); - final Setting newSetting = Setting.simpleString("foo.new", Property.NodeScope); - final Setting remainingSetting = Setting.simpleString("foo.remaining", Property.NodeScope); - - final AbstractScopedSettings service = new ClusterSettings( - Settings.EMPTY, - new HashSet<>(Arrays.asList(oldSetting, newSetting, remainingSetting)), - Collections.singleton(new SettingUpgrader() { - - @Override - public Setting getSetting() { - return oldSetting; - } - - @Override - public String getKey(final String key) { - return "foo.new"; - } - - }) - ); - - final Settings settings = Settings.builder().put("foo.remaining", randomAlphaOfLength(8)).build(); - final Settings upgradedSettings = service.upgradeSettings(settings); - assertThat(upgradedSettings, sameInstance(settings)); - } - - public void testUpgradeComplexSetting() { - final Setting.AffixSetting oldSetting = Setting.affixKeySetting( - "foo.old.", - "suffix", - key -> Setting.simpleString(key, Property.NodeScope) - ); - final Setting.AffixSetting newSetting = Setting.affixKeySetting( - "foo.new.", - "suffix", - key -> Setting.simpleString(key, Property.NodeScope) - ); - final Setting.AffixSetting remainingSetting = Setting.affixKeySetting( - "foo.remaining.", - "suffix", - key -> Setting.simpleString(key, Property.NodeScope) - ); - - final AbstractScopedSettings service = new ClusterSettings( - Settings.EMPTY, - new HashSet<>(Arrays.asList(oldSetting, newSetting, remainingSetting)), - Collections.singleton(new SettingUpgrader() { - - @Override - public Setting getSetting() { - return oldSetting; - } - - @Override - public String getKey(final String key) { - return key.replaceFirst("^foo\\.old", "foo\\.new"); - } - - @Override - public String getValue(final String value) { - return "new." + value; - } - - }) - ); - - final int count = randomIntBetween(1, 8); - final List concretes = new ArrayList<>(count); - final Settings.Builder builder = Settings.builder(); - for (int i = 0; i < count; i++) { - final String concrete = randomAlphaOfLength(8); - concretes.add(concrete); - builder.put("foo.old." + concrete + ".suffix", randomAlphaOfLength(8)); - builder.put("foo.remaining." + concrete + ".suffix", randomAlphaOfLength(8)); - } - final Settings settings = builder.build(); - final Settings upgradedSettings = service.upgradeSettings(settings); - for (final String concrete : concretes) { - assertFalse(oldSetting.getConcreteSettingForNamespace(concrete).exists(upgradedSettings)); - assertTrue(newSetting.getConcreteSettingForNamespace(concrete).exists(upgradedSettings)); - assertThat( - newSetting.getConcreteSettingForNamespace(concrete).get(upgradedSettings), - equalTo("new." + oldSetting.getConcreteSettingForNamespace(concrete).get(settings)) - ); - assertTrue(remainingSetting.getConcreteSettingForNamespace(concrete).exists(upgradedSettings)); - assertThat( - remainingSetting.getConcreteSettingForNamespace(concrete).get(upgradedSettings), - equalTo(remainingSetting.getConcreteSettingForNamespace(concrete).get(settings)) - ); - } - } - - public void testUpgradeListSetting() { - final Setting> oldSetting = Setting.listSetting( - "foo.old", - Collections.emptyList(), - Function.identity(), - Property.NodeScope - ); - final Setting> newSetting = Setting.listSetting( - "foo.new", - Collections.emptyList(), - Function.identity(), - Property.NodeScope - ); - - final AbstractScopedSettings service = new ClusterSettings( - Settings.EMPTY, - new HashSet<>(Arrays.asList(oldSetting, newSetting)), - Collections.singleton(new SettingUpgrader>() { - - @Override - public Setting> getSetting() { - return oldSetting; - } - - @Override - public String getKey(final String key) { - return "foo.new"; - } - - @Override - public List getListValue(final List value) { - return value.stream().map(s -> "new." + s).toList(); - } - }) - ); - - final int length = randomIntBetween(0, 16); - final List values = length == 0 ? Collections.emptyList() : new ArrayList<>(length); - for (int i = 0; i < length; i++) { - values.add(randomAlphaOfLength(8)); - } - - final Settings settings = Settings.builder().putList("foo.old", values).build(); - final Settings upgradedSettings = service.upgradeSettings(settings); - assertFalse(oldSetting.exists(upgradedSettings)); - assertTrue(newSetting.exists(upgradedSettings)); - assertThat(newSetting.get(upgradedSettings), equalTo(oldSetting.get(settings).stream().map(s -> "new." + s).toList())); - } - } diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java index 429213b42b3b..a8395145d2e4 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingsModuleTests.java @@ -17,7 +17,6 @@ import java.util.Set; import java.util.function.Supplier; -import static java.util.Collections.emptySet; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.hasToString; @@ -155,8 +154,7 @@ public void testRegisterSettingsFilter() { Setting.boolSetting("bar.foo", true, Property.NodeScope, Property.Filtered), Setting.boolSetting("bar.baz", true, Property.NodeScope) ), - Arrays.asList("foo.*", "bar.foo"), - emptySet() + Arrays.asList("foo.*", "bar.foo") ); fail(); } catch (IllegalArgumentException ex) { @@ -169,8 +167,7 @@ public void testRegisterSettingsFilter() { Setting.boolSetting("bar.foo", true, Property.NodeScope, Property.Filtered), Setting.boolSetting("bar.baz", true, Property.NodeScope) ), - Arrays.asList("foo.*"), - emptySet() + Arrays.asList("foo.*") ); assertInstanceBinding(module, Settings.class, (s) -> s == settings); assertInstanceBinding(module, SettingsFilter.class, (s) -> s.filter(settings).size() == 1); @@ -217,28 +214,19 @@ public void testMutuallyExclusiveScopes() { public void testPluginSettingWithoutNamespace() { final String key = randomAlphaOfLength(8); final Setting setting = Setting.simpleString(key, Property.NodeScope); - runSettingWithoutNamespaceTest( - key, - () -> new SettingsModule(Settings.EMPTY, List.of(setting), List.of(), Set.of(), Set.of(), Set.of()) - ); + runSettingWithoutNamespaceTest(key, () -> new SettingsModule(Settings.EMPTY, List.of(setting), List.of(), Set.of(), Set.of())); } public void testClusterSettingWithoutNamespace() { final String key = randomAlphaOfLength(8); final Setting setting = Setting.simpleString(key, Property.NodeScope); - runSettingWithoutNamespaceTest( - key, - () -> new SettingsModule(Settings.EMPTY, List.of(), List.of(), Set.of(), Set.of(setting), Set.of()) - ); + runSettingWithoutNamespaceTest(key, () -> new SettingsModule(Settings.EMPTY, List.of(), List.of(), Set.of(setting), Set.of())); } public void testIndexSettingWithoutNamespace() { final String key = randomAlphaOfLength(8); final Setting setting = Setting.simpleString(key, Property.IndexScope); - runSettingWithoutNamespaceTest( - key, - () -> new SettingsModule(Settings.EMPTY, List.of(), List.of(), Set.of(), Set.of(), Set.of(setting)) - ); + runSettingWithoutNamespaceTest(key, () -> new SettingsModule(Settings.EMPTY, List.of(), List.of(), Set.of(), Set.of(setting))); } private void runSettingWithoutNamespaceTest(final String key, final Supplier supplier) { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdaterTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingsUpdaterTests.java similarity index 99% rename from server/src/test/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdaterTests.java rename to server/src/test/java/org/elasticsearch/common/settings/SettingsUpdaterTests.java index c3c7519293e6..e216a90b67ed 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdaterTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingsUpdaterTests.java @@ -5,16 +5,13 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -package org.elasticsearch.action.admin.cluster.settings; +package org.elasticsearch.common.settings; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/AbstractLifecycleRunnableTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/AbstractLifecycleRunnableTests.java deleted file mode 100644 index e1f28a1c8072..000000000000 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/AbstractLifecycleRunnableTests.java +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.common.util.concurrent; - -import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.SuppressLoggerChecks; -import org.elasticsearch.common.component.Lifecycle; -import org.elasticsearch.test.ESTestCase; -import org.mockito.InOrder; - -import java.util.concurrent.Callable; - -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.when; - -/** - * Tests {@link AbstractLifecycleRunnable}. - */ -public class AbstractLifecycleRunnableTests extends ESTestCase { - private final Lifecycle lifecycle = mock(Lifecycle.class); - private final Logger logger = mock(Logger.class); - - public void testDoRunOnlyRunsWhenNotStoppedOrClosed() throws Exception { - Callable runCallable = mock(Callable.class); - - // it's "not stopped or closed" - when(lifecycle.stoppedOrClosed()).thenReturn(false); - - AbstractLifecycleRunnable runnable = new AbstractLifecycleRunnable(lifecycle, logger) { - @Override - public void onFailure(Exception e) { - fail("It should not fail"); - } - - @Override - protected void doRunInLifecycle() throws Exception { - runCallable.call(); - } - }; - - runnable.run(); - - InOrder inOrder = inOrder(lifecycle, logger, runCallable); - - inOrder.verify(lifecycle).stoppedOrClosed(); - inOrder.verify(runCallable).call(); - inOrder.verify(lifecycle).stoppedOrClosed(); // onAfter uses it too, but we're not testing it here - inOrder.verifyNoMoreInteractions(); - } - - @SuppressLoggerChecks(reason = "mock usage") - public void testDoRunDoesNotRunWhenStoppedOrClosed() throws Exception { - Callable runCallable = mock(Callable.class); - - // it's stopped or closed - when(lifecycle.stoppedOrClosed()).thenReturn(true); - - AbstractLifecycleRunnable runnable = new AbstractLifecycleRunnable(lifecycle, logger) { - @Override - public void onFailure(Exception e) { - fail("It should not fail"); - } - - @Override - protected void doRunInLifecycle() throws Exception { - fail("Should not run with lifecycle stopped or closed."); - } - }; - - runnable.run(); - - InOrder inOrder = inOrder(lifecycle, logger, runCallable); - - inOrder.verify(lifecycle).stoppedOrClosed(); - inOrder.verify(logger).trace(anyString()); - inOrder.verify(lifecycle).stoppedOrClosed(); // onAfter uses it too, but we're not testing it here - inOrder.verifyNoMoreInteractions(); - } - - public void testOnAfterOnlyWhenNotStoppedOrClosed() throws Exception { - Callable runCallable = mock(Callable.class); - Callable afterCallable = mock(Callable.class); - - // it's "not stopped or closed" - when(lifecycle.stoppedOrClosed()).thenReturn(false); - - AbstractLifecycleRunnable runnable = new AbstractLifecycleRunnable(lifecycle, logger) { - @Override - public void onFailure(Exception e) { - fail("It should not fail"); - } - - @Override - protected void doRunInLifecycle() throws Exception { - runCallable.call(); - } - - @Override - protected void onAfterInLifecycle() { - try { - afterCallable.call(); - } catch (Exception e) { - fail("Unexpected for mock."); - } - } - }; - - runnable.run(); - - InOrder inOrder = inOrder(lifecycle, logger, runCallable, afterCallable); - - inOrder.verify(lifecycle).stoppedOrClosed(); - inOrder.verify(runCallable).call(); - inOrder.verify(lifecycle).stoppedOrClosed(); - inOrder.verify(afterCallable).call(); - inOrder.verifyNoMoreInteractions(); - } - - public void testOnAfterDoesNotHappenWhenStoppedOrClosed() throws Exception { - Callable runCallable = mock(Callable.class); - - // it's stopped or closed - when(lifecycle.stoppedOrClosed()).thenReturn(true); - - AbstractLifecycleRunnable runnable = new AbstractLifecycleRunnable(lifecycle, logger) { - @Override - public void onFailure(Exception e) { - fail("It should not fail"); - } - - @Override - protected void doRunInLifecycle() throws Exception { - fail("Should not run with lifecycle stopped or closed."); - } - - @Override - protected void onAfterInLifecycle() { - fail("Should not run with lifecycle stopped or closed."); - } - }; - - runnable.run(); - - InOrder inOrder = inOrder(lifecycle, runCallable); - - inOrder.verify(lifecycle, times(2)).stoppedOrClosed(); - inOrder.verifyNoMoreInteractions(); - } -} diff --git a/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java b/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java index 49ddd3f1cc44..769cd2ce8105 100644 --- a/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.discovery; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.coordination.Coordinator; import org.elasticsearch.cluster.coordination.LeaderHeartbeatService; @@ -16,6 +15,7 @@ import org.elasticsearch.cluster.coordination.Reconfigurator; import org.elasticsearch.cluster.coordination.StatefulPreVoteCollector; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.cluster.routing.RerouteService; import org.elasticsearch.cluster.service.ClusterApplier; import org.elasticsearch.cluster.service.MasterService; @@ -75,7 +75,7 @@ default Map> getSeedHostProviders( public void setupDummyServices() { transportService = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), mock(ThreadPool.class), null diff --git a/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java b/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java index f6fe14fceb92..7583fca8c59f 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java @@ -96,7 +96,7 @@ public void testUpgradesLegitimateVersions() { v -> v.after(Version.CURRENT) || v.before(Version.CURRENT.minimumCompatibilityVersion()), this::randomVersion ), - IndexVersion.CURRENT + IndexVersion.current() ).upgradeToCurrentVersion(); assertThat(nodeMetadata.nodeVersion(), equalTo(Version.CURRENT)); assertThat(nodeMetadata.nodeId(), equalTo(nodeId)); @@ -107,7 +107,7 @@ public void testUpgradesMissingVersion() { final IllegalStateException illegalStateException = expectThrows( IllegalStateException.class, - () -> new NodeMetadata(nodeId, Version.V_EMPTY, IndexVersion.CURRENT).upgradeToCurrentVersion() + () -> new NodeMetadata(nodeId, Version.V_EMPTY, IndexVersion.current()).upgradeToCurrentVersion() ); assertThat( illegalStateException.getMessage(), @@ -118,7 +118,7 @@ public void testUpgradesMissingVersion() { public void testDoesNotUpgradeFutureVersion() { final IllegalStateException illegalStateException = expectThrows( IllegalStateException.class, - () -> new NodeMetadata(randomAlphaOfLength(10), tooNewVersion(), IndexVersion.CURRENT).upgradeToCurrentVersion() + () -> new NodeMetadata(randomAlphaOfLength(10), tooNewVersion(), IndexVersion.current()).upgradeToCurrentVersion() ); assertThat( illegalStateException.getMessage(), @@ -129,7 +129,7 @@ public void testDoesNotUpgradeFutureVersion() { public void testDoesNotUpgradeAncientVersion() { final IllegalStateException illegalStateException = expectThrows( IllegalStateException.class, - () -> new NodeMetadata(randomAlphaOfLength(10), tooOldVersion(), IndexVersion.CURRENT).upgradeToCurrentVersion() + () -> new NodeMetadata(randomAlphaOfLength(10), tooOldVersion(), IndexVersion.current()).upgradeToCurrentVersion() ); assertThat( illegalStateException.getMessage(), @@ -150,7 +150,7 @@ public void testUpgradeMarksPreviousVersion() { final String nodeId = randomAlphaOfLength(10); final Version version = VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.V_8_0_0); - final NodeMetadata nodeMetadata = new NodeMetadata(nodeId, version, IndexVersion.CURRENT).upgradeToCurrentVersion(); + final NodeMetadata nodeMetadata = new NodeMetadata(nodeId, version, IndexVersion.current()).upgradeToCurrentVersion(); assertThat(nodeMetadata.nodeVersion(), equalTo(Version.CURRENT)); assertThat(nodeMetadata.previousNodeVersion(), equalTo(version)); } @@ -160,7 +160,7 @@ public static Version tooNewVersion() { } public static IndexVersion tooNewIndexVersion() { - return IndexVersion.fromId(between(IndexVersion.CURRENT.id() + 1, 99999999)); + return IndexVersion.fromId(between(IndexVersion.current().id() + 1, 99999999)); } public static Version tooOldVersion() { diff --git a/server/src/test/java/org/elasticsearch/gateway/ClusterStateUpdatersTests.java b/server/src/test/java/org/elasticsearch/gateway/ClusterStateUpdatersTests.java index 616c24a9b593..e9e8fb70d9ea 100644 --- a/server/src/test/java/org/elasticsearch/gateway/ClusterStateUpdatersTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/ClusterStateUpdatersTests.java @@ -19,24 +19,15 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.SettingUpgrader; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.Index; -import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.ESTestCase; import java.util.Arrays; -import java.util.Collections; -import java.util.Set; -import java.util.function.BiConsumer; import java.util.function.Function; import java.util.stream.Collectors; -import java.util.stream.Stream; import static org.elasticsearch.cluster.metadata.Metadata.CLUSTER_READ_ONLY_BLOCK; import static org.elasticsearch.gateway.ClusterStateUpdaters.addStateNotRecoveredBlock; @@ -46,7 +37,6 @@ import static org.elasticsearch.gateway.ClusterStateUpdaters.removeStateNotRecoveredBlock; import static org.elasticsearch.gateway.ClusterStateUpdaters.setLocalNode; import static org.elasticsearch.gateway.ClusterStateUpdaters.updateRoutingTable; -import static org.elasticsearch.gateway.ClusterStateUpdaters.upgradeAndArchiveUnknownOrInvalidSettings; import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -54,58 +44,6 @@ public class ClusterStateUpdatersTests extends ESTestCase { - public void testUpgradePersistentSettings() { - runUpgradeSettings(Metadata.Builder::persistentSettings, Metadata::persistentSettings); - } - - public void testUpgradeTransientSettings() { - runUpgradeSettings(Metadata.Builder::transientSettings, Metadata::transientSettings); - } - - private void runUpgradeSettings( - final BiConsumer applySettingsToBuilder, - final Function metadataSettings - ) { - final Setting oldSetting = Setting.simpleString("foo.old", Setting.Property.Dynamic, Setting.Property.NodeScope); - final Setting newSetting = Setting.simpleString("foo.new", Setting.Property.Dynamic, Setting.Property.NodeScope); - final Set> settingsSet = Stream.concat( - ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.stream(), - Stream.of(oldSetting, newSetting) - ).collect(Collectors.toSet()); - final ClusterSettings clusterSettings = new ClusterSettings( - Settings.EMPTY, - settingsSet, - Collections.singleton(new SettingUpgrader() { - - @Override - public Setting getSetting() { - return oldSetting; - } - - @Override - public String getKey(final String key) { - return "foo.new"; - } - - @Override - public String getValue(final String value) { - return "new." + value; - } - - }) - ); - final ClusterService clusterService = new ClusterService(Settings.EMPTY, clusterSettings, null, (TaskManager) null); - final Metadata.Builder builder = Metadata.builder(); - final Settings settings = Settings.builder().put("foo.old", randomAlphaOfLength(8)).build(); - applySettingsToBuilder.accept(builder, settings); - final ClusterState initialState = ClusterState.builder(clusterService.getClusterName()).metadata(builder.build()).build(); - final ClusterState state = upgradeAndArchiveUnknownOrInvalidSettings(initialState, clusterService.getClusterSettings()); - - assertFalse(oldSetting.exists(metadataSettings.apply(state.metadata()))); - assertTrue(newSetting.exists(metadataSettings.apply(state.metadata()))); - assertThat(newSetting.get(metadataSettings.apply(state.metadata())), equalTo("new." + oldSetting.get(settings))); - } - private IndexMetadata createIndexMetadata(final String name, final Settings settings) { return IndexMetadata.builder(name) .settings(indexSettings(Version.CURRENT, 1, 0).put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).put(settings)) diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java b/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java index b8ae670961d2..3b5cb5b50dee 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.plugins.ClusterCoordinationPlugin; import org.elasticsearch.plugins.MetadataUpgrader; import org.elasticsearch.test.ESTestCase; @@ -187,7 +188,7 @@ private static class MockIndexMetadataVerifier extends IndexMetadataVerifier { } @Override - public IndexMetadata verifyIndexMetadata(IndexMetadata indexMetadata, Version minimumIndexCompatibilityVersion) { + public IndexMetadata verifyIndexMetadata(IndexMetadata indexMetadata, IndexVersion minimumIndexCompatibilityVersion) { return upgrade ? IndexMetadata.builder(indexMetadata).build() : indexMetadata; } } diff --git a/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java b/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java index c1ea3f04170d..a9d8fb0f09c0 100644 --- a/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java @@ -1493,8 +1493,8 @@ public void testOldestIndexVersionIsCorrectlySerialized() throws IOException { final IndexVersion[] indexVersions = new IndexVersion[] { oldVersion, - IndexVersion.CURRENT, - IndexVersion.fromId(IndexVersion.CURRENT.id() + 1) }; + IndexVersion.current(), + IndexVersion.fromId(IndexVersion.current().id() + 1) }; int lastIndexNum = randomIntBetween(9, 50); Metadata.Builder b = Metadata.builder(); for (IndexVersion indexVersion : indexVersions) { diff --git a/server/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java b/server/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java index c6cba4c114f6..b76e41b067b2 100644 --- a/server/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.Nullable; import org.elasticsearch.env.ShardLockObtainFailedException; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.snapshots.Snapshot; @@ -475,7 +476,7 @@ private RoutingAllocation getRestoreRoutingAllocation(AllocationDeciders allocat new SnapshotRecoverySource( UUIDs.randomBase64UUID(), snapshot, - Version.CURRENT, + IndexVersion.current(), new IndexId(shardId.getIndexName(), UUIDs.randomBase64UUID(random())) ) ) diff --git a/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java b/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java index d8ab4d32c65b..7c8177b44582 100644 --- a/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java +++ b/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java @@ -52,7 +52,6 @@ import org.junit.After; import org.junit.Assert; import org.junit.Before; -import org.mockito.ArgumentCaptor; import java.net.InetSocketAddress; import java.net.UnknownHostException; @@ -66,7 +65,9 @@ import java.util.Locale; import java.util.Map; import java.util.Set; +import java.util.concurrent.BlockingDeque; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.LinkedBlockingDeque; import java.util.concurrent.TimeUnit; import static java.net.InetAddress.getByName; @@ -79,17 +80,13 @@ import static org.elasticsearch.test.LambdaMatchers.transformedMatch; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.contains; -import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; public class AbstractHttpServerTransportTests extends ESTestCase { @@ -97,6 +94,9 @@ public class AbstractHttpServerTransportTests extends ESTestCase { private ThreadPool threadPool; private Recycler recycler; + private static final int LONG_GRACE_PERIOD_MS = 20_000; + private static final int SHORT_GRACE_PERIOD_MS = 1; + @Before public void setup() throws Exception { networkService = new NetworkService(Collections.emptyList()); @@ -422,7 +422,7 @@ public void testHandlingCompatibleVersionParsingErrors() { headers ); - transport.incomingRequest(fakeHttpRequest, null); + transport.incomingRequest(fakeHttpRequest, new TestHttpChannel()); } } @@ -444,7 +444,7 @@ public void testIncorrectHeaderHandling() { headers ); - transport.incomingRequest(fakeHttpRequest, null); + transport.incomingRequest(fakeHttpRequest, new TestHttpChannel()); } try (AbstractHttpServerTransport transport = failureAssertingtHttpServerTransport(clusterSettings, Set.of("Content-Type"))) { Map> headers = new HashMap<>(); @@ -460,7 +460,7 @@ public void testIncorrectHeaderHandling() { headers ); - transport.incomingRequest(fakeHttpRequest, null); + transport.incomingRequest(fakeHttpRequest, new TestHttpChannel()); } } @@ -624,6 +624,7 @@ public HttpStats stats() { .build(); try (var httpChannel = fakeRestRequest.getHttpChannel()) { + transport.serverAcceptedChannel(httpChannel); transport.incomingRequest(fakeRestRequest.getHttpRequest(), httpChannel); } @@ -722,6 +723,7 @@ public HttpStats stats() { .withPath(path) .withHeaders(Collections.singletonMap(Task.X_OPAQUE_ID_HTTP_HEADER, Collections.singletonList(opaqueId))) .build(); + transport.serverAcceptedChannel(fakeRestRequest.getHttpChannel()); transport.incomingRequest(fakeRestRequest.getHttpRequest(), fakeRestRequest.getHttpChannel()); mockAppender.assertAllExpectationsMatched(); } finally { @@ -904,52 +906,22 @@ protected void stopInternal() {} } } - @SuppressWarnings("unchecked") - public void testSetGracefulClose() { - try (AbstractHttpServerTransport transport = new TestHttpServerTransport(Settings.EMPTY)) { - final TestHttpRequest httpRequest = new TestHttpRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"); - - HttpChannel httpChannel = mock(HttpChannel.class); - transport.incomingRequest(httpRequest, httpChannel); - - var response = ArgumentCaptor.forClass(TestHttpResponse.class); - var listener = ArgumentCaptor.forClass(ActionListener.class); - verify(httpChannel).sendResponse(response.capture(), listener.capture()); - - listener.getValue().onResponse(null); - assertThat(response.getValue().containsHeader(CONNECTION), is(false)); - verify(httpChannel, never()).close(); - - httpChannel = mock(HttpChannel.class); - transport.gracefullyCloseConnections(); - transport.incomingRequest(httpRequest, httpChannel); - verify(httpChannel).sendResponse(response.capture(), listener.capture()); - - listener.getValue().onResponse(null); - assertThat(response.getValue().headers().get(CONNECTION), containsInAnyOrder(DefaultRestChannel.CLOSE)); - verify(httpChannel).close(); - } - } - public void testStopDoesntWaitIfGraceIsZero() { - try (TestHttpServerTransport transport = new TestHttpServerTransport(Settings.EMPTY)) { - transport.bindServer(); - final TestHttpRequest httpRequest = new TestHttpRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"); - + try (var noWait = LogExpectation.unexpectWait(); var transport = new TestHttpServerTransport(Settings.EMPTY)) { TestHttpChannel httpChannel = new TestHttpChannel(); transport.serverAcceptedChannel(httpChannel); - transport.incomingRequest(httpRequest, httpChannel); + transport.incomingRequest(testHttpRequest(), httpChannel); transport.doStop(); assertFalse(transport.testHttpServerChannel.isOpen()); assertFalse(httpChannel.isOpen()); + noWait.assertExpectationsMatched(); } } public void testStopWorksWithNoOpenRequests() { - try (TestHttpServerTransport transport = new TestHttpServerTransport(gracePeriod(1))) { - transport.bindServer(); - + var grace = SHORT_GRACE_PERIOD_MS; + try (var noWait = LogExpectation.unexpectedTimeout(grace); var transport = new TestHttpServerTransport(gracePeriod(grace))) { final TestHttpRequest httpRequest = new TestHttpRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/") { @Override public Map> getHeaders() { @@ -965,122 +937,104 @@ public Map> getHeaders() { // TestHttpChannel will throw if closed twice, so this ensures close is not called. transport.doStop(); assertFalse(transport.testHttpServerChannel.isOpen()); + + noWait.assertExpectationsMatched(); } } - public void testStopForceClosesConnection() { - final Logger mockLogger = LogManager.getLogger(AbstractHttpServerTransport.class); - Loggers.setLevel(mockLogger, Level.WARN); - final MockLogAppender appender = new MockLogAppender(); - try (TestHttpServerTransport transport = new TestHttpServerTransport(gracePeriod(10))) { - Loggers.addAppender(mockLogger, appender); - appender.start(); - - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( - "message", - AbstractHttpServerTransport.class.getName(), - Level.WARN, - "timed out while waiting [10]ms for clients to close connections" - ) - ); + public void testStopClosesIdleConnectionImmediately() { + var grace = SHORT_GRACE_PERIOD_MS; + try ( + var noTimeout = LogExpectation.unexpectedTimeout(grace); + TestHttpServerTransport transport = new TestHttpServerTransport(gracePeriod(grace)) + ) { - transport.bindServer(); - final TestHttpRequest httpRequest = new TestHttpRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"); TestHttpChannel httpChannel = new TestHttpChannel(); transport.serverAcceptedChannel(httpChannel); - transport.incomingRequest(httpRequest, httpChannel); - // idle connection + + transport.incomingRequest(testHttpRequest(), httpChannel); + // channel now idle + assertTrue(httpChannel.isOpen()); transport.doStop(); assertFalse(httpChannel.isOpen()); assertFalse(transport.testHttpServerChannel.isOpen()); + // ensure we timed out waiting for connections to close naturally - appender.assertAllExpectationsMatched(); - } finally { - appender.stop(); - Loggers.removeAppender(mockLogger, appender); + noTimeout.assertExpectationsMatched(); } } public void testStopForceClosesConnectionDuringRequest() throws Exception { - final Logger mockLogger = LogManager.getLogger(AbstractHttpServerTransport.class); - Loggers.setLevel(mockLogger, Level.WARN); - final MockLogAppender appender = new MockLogAppender(); - final var inDispatch = new CountDownLatch(1); - try (TestHttpServerTransport transport = new TestHttpServerTransport(gracePeriod(10), new HttpServerTransport.Dispatcher() { - @Override - public void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext threadContext) { - inDispatch.countDown(); - } - - @Override - public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, Throwable cause) { - channel.sendResponse(emptyResponse(RestStatus.BAD_REQUEST)); - } - })) { - Loggers.addAppender(mockLogger, appender); - appender.start(); + var grace = SHORT_GRACE_PERIOD_MS; + TestHttpChannel httpChannel = new TestHttpChannel(); + var doneWithRequest = new CountDownLatch(1); + try ( + var timeout = LogExpectation.expectTimeout(grace); + TestHttpServerTransport transport = new TestHttpServerTransport(gracePeriod(grace)) + ) { - appender.addExpectation( - new MockLogAppender.SeenEventExpectation( - "message", - AbstractHttpServerTransport.class.getName(), - Level.WARN, - "timed out while waiting [10]ms for clients to close connections" - ) - ); + httpChannel.blockSendResponse(); + var inResponse = httpChannel.notifyInSendResponse(); - transport.bindServer(); - final TestHttpRequest httpRequest = new TestHttpRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"); - TestHttpChannel httpChannel = new TestHttpChannel(); transport.serverAcceptedChannel(httpChannel); - new Thread( - () -> transport.incomingRequest(httpRequest, httpChannel), - "testStopForceClosesConnectionDuringRequest -> incomingRequest" - ).start(); - inDispatch.await(); + new Thread(() -> { + transport.incomingRequest(testHttpRequest(), httpChannel); + doneWithRequest.countDown(); + }, "testStopForceClosesConnectionDuringRequest -> incomingRequest").start(); + + inResponse.await(); + assertTrue(httpChannel.isOpen()); transport.doStop(); + assertFalse(httpChannel.isOpen()); assertFalse(transport.testHttpServerChannel.isOpen()); - assertThat(httpChannel.responses, hasSize(0)); + assertTrue(httpChannel.noResponses()); + // ensure we timed out waiting for connections to close naturally - appender.assertAllExpectationsMatched(); + timeout.assertExpectationsMatched(); } finally { - appender.stop(); - Loggers.removeAppender(mockLogger, appender); + // unblock request thread + httpChannel.allowSendResponse(); + doneWithRequest.countDown(); } } - public void testStopClosesChannelAfterRequest() { - try (TestHttpServerTransport transport = new TestHttpServerTransport(gracePeriod(100))) { - transport.bindServer(); + public void testStopClosesChannelAfterRequest() throws Exception { + var grace = LONG_GRACE_PERIOD_MS; + try (var noTimeout = LogExpectation.unexpectedTimeout(grace); var transport = new TestHttpServerTransport(gracePeriod(grace))) { TestHttpChannel httpChannel = new TestHttpChannel(); transport.serverAcceptedChannel(httpChannel); - transport.incomingRequest(new TestHttpRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"), httpChannel); + transport.incomingRequest(testHttpRequest(), httpChannel); TestHttpChannel idleChannel = new TestHttpChannel(); transport.serverAcceptedChannel(idleChannel); - transport.incomingRequest(new TestHttpRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"), idleChannel); + transport.incomingRequest(testHttpRequest(), idleChannel); CountDownLatch stopped = new CountDownLatch(1); + var inSendResponse = httpChannel.notifyInSendResponse(); + httpChannel.blockSendResponse(); + + // one last request, should cause httpChannel to close after the request once we start shutting down. + new Thread(() -> transport.incomingRequest(testHttpRequest(), httpChannel), "testStopClosesChannelAfterRequest last request") + .start(); + + inSendResponse.await(); + new Thread(() -> { transport.doStop(); stopped.countDown(); - }).start(); + }, "testStopClosesChannelAfterRequest stopping transport").start(); - try { - assertTrue(transport.gracefullyCloseCalled.await(10, TimeUnit.SECONDS)); - } catch (InterruptedException e) { - fail("server never called grace period"); - } + // wait until we are shutting down + assertBusy(() -> assertFalse(transport.isAcceptingConnections())); + httpChannel.allowSendResponse(); - // one last request, should cause httpChannel to close naturally now that we've set grace period - transport.incomingRequest(new TestHttpRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"), httpChannel); - assertFalse(httpChannel.isOpen()); + // wait for channel to close + assertBusy(() -> assertFalse(httpChannel.isOpen())); try { assertTrue(stopped.await(10, TimeUnit.SECONDS)); @@ -1092,42 +1046,39 @@ public void testStopClosesChannelAfterRequest() { assertFalse(idleChannel.isOpen()); assertThat(httpChannel.responses, hasSize(2)); - HttpResponse first = httpChannel.responses.get(0); - HttpResponse last = httpChannel.responses.get(1); - assertFalse(first.containsHeader(CONNECTION)); - assertTrue(last.containsHeader(CONNECTION)); - assertThat(last, instanceOf(TestHttpResponse.class)); - assertThat(((TestHttpResponse) last).headers().get(CONNECTION).get(0), equalTo(CLOSE)); + // should have closed naturally without having to wait + noTimeout.assertExpectationsMatched(); } } - public void testForceClosesOpenChannels() { - try (TestHttpServerTransport transport = new TestHttpServerTransport(gracePeriod(100))) { - transport.bindServer(); + public void testForceClosesOpenChannels() throws Exception { + var grace = 100; // this test waits for the entire grace, so try to keep it short + TestHttpChannel httpChannel = new TestHttpChannel(); + var doneWithRequest = new CountDownLatch(1); + try (var timeout = LogExpectation.expectTimeout(grace); var transport = new TestHttpServerTransport(gracePeriod(grace))) { - TestHttpChannel httpChannel = new TestHttpChannel(true); transport.serverAcceptedChannel(httpChannel); - transport.incomingRequest(new TestHttpRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"), httpChannel); + transport.incomingRequest(testHttpRequest(), httpChannel); CountDownLatch stopped = new CountDownLatch(1); + var inResponse = httpChannel.notifyInSendResponse(); + httpChannel.blockSendResponse(); + new Thread(() -> { - try { - assertTrue(transport.gracefullyCloseCalled.await(100, TimeUnit.MILLISECONDS)); - } catch (InterruptedException e) { - fail("server never called grace period"); - } - // one last request, will attempt to close naturally, but we are blocking it - transport.incomingRequest(new TestHttpRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"), httpChannel); + transport.incomingRequest(testHttpRequest(), httpChannel); + doneWithRequest.countDown(); }).start(); + inResponse.await(); + new Thread(() -> { transport.doStop(); stopped.countDown(); }).start(); try { - assertTrue(stopped.await(10, TimeUnit.SECONDS)); + assertTrue(stopped.await(2 * LONG_GRACE_PERIOD_MS, TimeUnit.MILLISECONDS)); } catch (InterruptedException e) { fail("server never stopped"); } @@ -1135,13 +1086,15 @@ public void testForceClosesOpenChannels() { assertFalse(transport.testHttpServerChannel.isOpen()); assertFalse(httpChannel.isOpen()); - assertThat(httpChannel.responses, hasSize(2)); - HttpResponse first = httpChannel.responses.get(0); - HttpResponse last = httpChannel.responses.get(1); - assertFalse(first.containsHeader(CONNECTION)); - assertTrue(last.containsHeader(CONNECTION)); - assertThat(last, instanceOf(TestHttpResponse.class)); - assertThat(((TestHttpResponse) last).headers().get(CONNECTION).get(0), equalTo(CLOSE)); + HttpResponse first = httpChannel.getResponse(); + assertTrue(httpChannel.noResponses()); // never sent the second response + assertThat(first, instanceOf(TestHttpResponse.class)); + + timeout.assertExpectationsMatched(); + } finally { + // cleanup thread + httpChannel.allowSendResponse(); + doneWithRequest.await(); } } @@ -1193,8 +1146,7 @@ public Collection getRestHeaders() { } private class TestHttpServerTransport extends AbstractHttpServerTransport { - public TestHttpChannel testHttpServerChannel = new TestHttpChannel(false); - public CountDownLatch gracefullyCloseCalled = new CountDownLatch(1); + public TestHttpChannel testHttpServerChannel = new TestHttpChannel(); TestHttpServerTransport(Settings settings, HttpServerTransport.Dispatcher dispatcher) { super( @@ -1207,6 +1159,7 @@ private class TestHttpServerTransport extends AbstractHttpServerTransport { new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), Tracer.NOOP ); + bindServer(); } TestHttpServerTransport(Settings settings) { @@ -1223,12 +1176,6 @@ public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, }); } - @Override - void gracefullyCloseConnections() { - super.gracefullyCloseConnections(); - gracefullyCloseCalled.countDown(); - } - @Override protected HttpServerChannel bind(InetSocketAddress hostAddress) { testHttpServerChannel.setLocalAddress(hostAddress); @@ -1244,25 +1191,80 @@ protected void doStart() { protected void stopInternal() {} } + private Settings gracePeriod(int ms) { + return Settings.builder().put(SETTING_HTTP_SERVER_SHUTDOWN_GRACE_PERIOD.getKey(), new TimeValue(ms)).build(); + } + private static class TestHttpChannel implements HttpChannel, HttpServerChannel { private boolean open = true; - private int numCloses = 0; - private final CountDownLatch closeLatch; private ActionListener closeListener; private InetSocketAddress localAddress; - public List responses = new ArrayList<>(); + private final BlockingDeque responses = new LinkedBlockingDeque<>(); + + private CountDownLatch notifySendResponse = null; + private CountDownLatch blockSendResponse = null; + + public CountDownLatch notifyInSendResponse() { + synchronized (this) { + assert notifySendResponse == null : "already notifying"; + notifySendResponse = new CountDownLatch(1); + return notifySendResponse; + } + } + + public synchronized void blockSendResponse() { + synchronized (this) { + assert blockSendResponse == null : "blockSendResponse already set"; + blockSendResponse = new CountDownLatch(1); + } + } + + public synchronized void allowSendResponse() { + synchronized (this) { + assert blockSendResponse != null : "blockSendResponse null, no need to allow"; + blockSendResponse.countDown(); + } + } - TestHttpChannel() { - this(false); + public boolean noResponses() { + return responses.peek() == null; } - TestHttpChannel(boolean blockFirstClose) { - closeLatch = blockFirstClose ? new CountDownLatch(1) : null; + public HttpResponse getResponse() { + try { + return responses.takeFirst(); + } catch (InterruptedException e) { + fail("interrupted"); + } + // unreachable + return null; } @Override public void sendResponse(HttpResponse response, ActionListener listener) { + CountDownLatch notify; + CountDownLatch blockSend; + synchronized (this) { + notify = notifySendResponse; + blockSend = blockSendResponse; + } + if (notify != null) { + notify.countDown(); + synchronized (this) { + notifySendResponse = null; + } + } + if (blockSend != null) { + try { + blockSend.await(); + synchronized (this) { + blockSendResponse = null; + } + } catch (InterruptedException e) { + fail("interrupted"); + } + } responses.add(response); listener.onResponse(null); } @@ -1283,26 +1285,12 @@ public InetSocketAddress getRemoteAddress() { @Override public void close() { - if (closeLatch != null) { - boolean waitForever; - synchronized (this) { - waitForever = numCloses == 0; - numCloses++; - } - if (waitForever) { - try { - if (closeLatch.await(1, TimeUnit.SECONDS) == false) { - return; - } - } catch (InterruptedException ie) { - throw new RuntimeException(ie); - } + synchronized (this) { + if (open == false) { + throw new IllegalStateException("channel already closed!"); } + open = false; } - if (open == false) { - throw new IllegalStateException("channel already closed!"); - } - open = false; if (closeListener != null) { closeListener.onResponse(null); } @@ -1326,7 +1314,75 @@ public void addCloseListener(ActionListener listener) { } } - private Settings gracePeriod(int ms) { - return Settings.builder().put(SETTING_HTTP_SERVER_SHUTDOWN_GRACE_PERIOD.getKey(), new TimeValue(ms)).build(); + private static class LogExpectation implements AutoCloseable { + private final Logger mockLogger; + private final MockLogAppender appender; + private boolean checked = false; + private final int grace; + + private LogExpectation(int grace) { + mockLogger = LogManager.getLogger(AbstractHttpServerTransport.class); + Loggers.setLevel(mockLogger, Level.DEBUG); + appender = new MockLogAppender(); + Loggers.addAppender(mockLogger, appender); + appender.start(); + this.grace = grace; + } + + public static LogExpectation expectTimeout(int grace) { + return new LogExpectation(grace).timedOut(true).wait(true); + } + + public static LogExpectation unexpectedTimeout(int grace) { + return new LogExpectation(grace).timedOut(false).wait(true); + } + + public static LogExpectation unexpectWait() { + return new LogExpectation(0).wait(false); + } + + private LogExpectation timedOut(boolean expected) { + var message = "timed out while waiting [" + grace + "]ms for clients to close connections"; + var name = "message"; + var logger = AbstractHttpServerTransport.class.getName(); + var level = Level.WARN; + if (expected) { + appender.addExpectation(new MockLogAppender.SeenEventExpectation(name, logger, level, message)); + } else { + appender.addExpectation(new MockLogAppender.UnseenEventExpectation(name, logger, level, message)); + } + return this; + } + + private LogExpectation wait(boolean expected) { + var message = "closing all client connections immediately"; + var name = "message"; + var logger = AbstractHttpServerTransport.class.getName(); + var level = Level.DEBUG; + if (expected) { + appender.addExpectation(new MockLogAppender.UnseenEventExpectation(name, logger, level, message)); + } else { + appender.addExpectation(new MockLogAppender.SeenEventExpectation(name, logger, level, message)); + } + return this; + } + + public void assertExpectationsMatched() { + appender.assertAllExpectationsMatched(); + checked = true; + } + + @Override + public void close() { + appender.stop(); + Loggers.removeAppender(mockLogger, appender); + if (checked == false) { + fail("did not check expectations matched in TimedOutLogExpectation"); + } + } + } + + private TestHttpRequest testHttpRequest() { + return new TestHttpRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"); } } diff --git a/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java b/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java index d050c2432025..dc86735737a3 100644 --- a/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java +++ b/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java @@ -172,8 +172,7 @@ public void testHeadersSet() { threadPool.getThreadContext(), CorsHandler.fromSettings(settings), httpTracer, - tracer, - false + tracer ); RestResponse resp = testRestResponse(); final String customHeader = "custom-header"; @@ -193,35 +192,6 @@ public void testHeadersSet() { assertEquals(resp.contentType(), headers.get(DefaultRestChannel.CONTENT_TYPE).get(0)); } - public void testCloseConnection() { - Settings settings = Settings.builder().build(); - final TestHttpRequest httpRequest = new TestHttpRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"); - final RestRequest request = RestRequest.request(parserConfig(), httpRequest, httpChannel); - HttpHandlingSettings handlingSettings = HttpHandlingSettings.fromSettings(settings); - // send a response - DefaultRestChannel channel = new DefaultRestChannel( - httpChannel, - httpRequest, - request, - bigArrays, - handlingSettings, - threadPool.getThreadContext(), - CorsHandler.fromSettings(settings), - httpTracer, - tracer, - true - ); - - RestResponse resp = testRestResponse(); - channel.sendResponse(resp); - // inspect what was written - ArgumentCaptor responseCaptor = ArgumentCaptor.forClass(TestHttpResponse.class); - verify(httpChannel).sendResponse(responseCaptor.capture(), any()); - TestHttpResponse httpResponse = responseCaptor.getValue(); - Map> headers = httpResponse.headers(); - assertThat(headers.get(DefaultRestChannel.CONNECTION), containsInAnyOrder(DefaultRestChannel.CLOSE)); - } - public void testNormallyNoConnectionClose() { Settings settings = Settings.builder().build(); final TestHttpRequest httpRequest = new TestHttpRequest(HttpRequest.HttpVersion.HTTP_1_1, RestRequest.Method.GET, "/"); @@ -237,8 +207,7 @@ public void testNormallyNoConnectionClose() { threadPool.getThreadContext(), CorsHandler.fromSettings(settings), httpTracer, - tracer, - false + tracer ); RestResponse resp = testRestResponse(); @@ -269,8 +238,7 @@ public void testCookiesSet() { threadPool.getThreadContext(), CorsHandler.fromSettings(settings), httpTracer, - tracer, - false + tracer ); channel.sendResponse(testRestResponse()); @@ -299,8 +267,7 @@ public void testReleaseInListener() throws IOException { threadPool.getThreadContext(), CorsHandler.fromSettings(settings), httpTracer, - tracer, - false + tracer ); final RestResponse response = new RestResponse( RestStatus.INTERNAL_SERVER_ERROR, @@ -368,8 +335,7 @@ public void testConnectionClose() throws Exception { threadPool.getThreadContext(), CorsHandler.fromSettings(settings), httpTracer, - tracer, - false + tracer ); channel.sendResponse(testRestResponse()); Class> listenerClass = (Class>) (Class) ActionListener.class; @@ -401,8 +367,7 @@ public void testResponseHeadersFiltering() { threadPool.getThreadContext(), CorsHandler.fromSettings(Settings.EMPTY), httpTracer, - tracer, - false + tracer ); doAnswer(invocationOnMock -> { ActionListener listener = invocationOnMock.getArgument(1); @@ -449,8 +414,7 @@ public RestRequest.Method method() { threadPool.getThreadContext(), CorsHandler.fromSettings(Settings.EMPTY), httpTracer, - tracer, - false + tracer ); // ESTestCase#after will invoke ensureAllArraysAreReleased which will fail if the response content was not released @@ -497,8 +461,7 @@ public HttpResponse createResponse(RestStatus status, BytesReference content) { threadPool.getThreadContext(), CorsHandler.fromSettings(Settings.EMPTY), httpTracer, - tracer, - false + tracer ); // ESTestCase#after will invoke ensureAllArraysAreReleased which will fail if the response content was not released @@ -547,8 +510,7 @@ public void testHandleHeadRequest() { threadPool.getThreadContext(), CorsHandler.fromSettings(Settings.EMPTY), httpTracer, - tracer, - false + tracer ); ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(HttpResponse.class); { @@ -608,8 +570,7 @@ public void sendResponse(HttpResponse response, ActionListener listener) { threadPool.getThreadContext(), new CorsHandler(CorsHandler.buildConfig(Settings.EMPTY)), new HttpTracer(), - tracer, - false + tracer ); final MockLogAppender sendingResponseMockLog = new MockLogAppender(); @@ -671,8 +632,7 @@ public void sendResponse(HttpResponse response, ActionListener listener) { threadPool.getThreadContext(), new CorsHandler(CorsHandler.buildConfig(Settings.EMPTY)), new HttpTracer(), - tracer, - false + tracer ); MockLogAppender mockLogAppender = new MockLogAppender(); @@ -728,8 +688,7 @@ public HttpResponse createResponse(RestStatus status, ChunkedRestResponseBody co threadPool.getThreadContext(), CorsHandler.fromSettings(Settings.EMPTY), new HttpTracer(), - tracer, - false + tracer ); var responseBody = new BytesArray(randomUnicodeOfLengthBetween(1, 100).getBytes(StandardCharsets.UTF_8)); @@ -799,8 +758,7 @@ private TestHttpResponse executeRequest(final Settings settings, final String or threadPool.getThreadContext(), new CorsHandler(CorsHandler.buildConfig(settings)), httpTracer, - tracer, - false + tracer ); channel.sendResponse(testRestResponse()); diff --git a/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java b/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java index 284c3d246ca4..fad2286342bf 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java @@ -167,7 +167,7 @@ public void testSettingsConsistency() { "index", Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, version.id()) - .put(IndexMetadata.SETTING_VERSION_COMPATIBILITY, IndexVersion.CURRENT.id()) + .put(IndexMetadata.SETTING_VERSION_COMPATIBILITY, IndexVersion.current().id()) .put("index.test.setting.int", 42) .build() ) diff --git a/server/src/test/java/org/elasticsearch/index/IndexVersionTests.java b/server/src/test/java/org/elasticsearch/index/IndexVersionTests.java index bee5f7761e82..46d45964b2c8 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexVersionTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexVersionTests.java @@ -130,9 +130,9 @@ public void testDefinedConstants() throws IllegalAccessException { public void testMin() { assertEquals( IndexVersionUtils.getPreviousVersion(), - IndexVersion.min(IndexVersion.CURRENT, IndexVersionUtils.getPreviousVersion()) + IndexVersion.min(IndexVersion.current(), IndexVersionUtils.getPreviousVersion()) ); - assertEquals(IndexVersion.fromId(1_01_01_99), IndexVersion.min(IndexVersion.fromId(1_01_01_99), IndexVersion.CURRENT)); + assertEquals(IndexVersion.fromId(1_01_01_99), IndexVersion.min(IndexVersion.fromId(1_01_01_99), IndexVersion.current())); IndexVersion version = IndexVersionUtils.randomVersion(); IndexVersion version1 = IndexVersionUtils.randomVersion(); if (version.id() <= version1.id()) { @@ -143,8 +143,8 @@ public void testMin() { } public void testMax() { - assertEquals(IndexVersion.CURRENT, IndexVersion.max(IndexVersion.CURRENT, IndexVersionUtils.getPreviousVersion())); - assertEquals(IndexVersion.CURRENT, IndexVersion.max(IndexVersion.fromId(1_01_01_99), IndexVersion.CURRENT)); + assertEquals(IndexVersion.current(), IndexVersion.max(IndexVersion.current(), IndexVersionUtils.getPreviousVersion())); + assertEquals(IndexVersion.current(), IndexVersion.max(IndexVersion.fromId(1_01_01_99), IndexVersion.current())); IndexVersion version = IndexVersionUtils.randomVersion(); IndexVersion version1 = IndexVersionUtils.randomVersion(); if (version.id() >= version1.id()) { @@ -155,8 +155,8 @@ public void testMax() { } public void testVersionConstantPresent() { - Set ignore = Set.of(IndexVersion.ZERO, IndexVersion.CURRENT, IndexVersion.MINIMUM_COMPATIBLE); - assertThat(IndexVersion.CURRENT, sameInstance(IndexVersion.fromId(IndexVersion.CURRENT.id()))); + Set ignore = Set.of(IndexVersion.ZERO, IndexVersion.current(), IndexVersion.MINIMUM_COMPATIBLE); + assertThat(IndexVersion.current(), sameInstance(IndexVersion.fromId(IndexVersion.current().id()))); final int iters = scaledRandomIntBetween(20, 100); for (int i = 0; i < iters; i++) { IndexVersion version = IndexVersionUtils.randomVersion(ignore); @@ -166,7 +166,7 @@ public void testVersionConstantPresent() { } public void testCURRENTIsLatest() { - assertThat(Collections.max(IndexVersion.getAllVersions()), is(IndexVersion.CURRENT)); + assertThat(Collections.max(IndexVersion.getAllVersions()), is(IndexVersion.current())); } public void testToString() { diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index aa5045f8648b..fdb6072cc42d 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -6734,7 +6734,7 @@ public long softUpdateDocuments(Term term, Iterable userData = new HashMap<>(store.readLastCommittedSegmentsInfo().userData); userData.remove(Engine.MIN_RETAINED_SEQNO); IndexWriterConfig indexWriterConfig = new IndexWriterConfig(null).setOpenMode(IndexWriterConfig.OpenMode.APPEND) - .setIndexCreatedVersionMajor(IndexVersion.CURRENT.luceneVersion().major) + .setIndexCreatedVersionMajor(IndexVersion.current().luceneVersion().major) .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) .setCommitOnClose(false) .setMergePolicy(NoMergePolicy.INSTANCE); diff --git a/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java b/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java index be83151b3653..8ed162f8cda8 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java @@ -90,7 +90,7 @@ public void testBasics() throws Exception { false, randomBoolean(), randomBoolean(), - IndexVersion.CURRENT + IndexVersion.current() ) ) { searcher = null; @@ -109,7 +109,7 @@ public void testBasics() throws Exception { true, randomBoolean(), randomBoolean(), - IndexVersion.CURRENT + IndexVersion.current() ) ) { searcher = null; @@ -134,7 +134,7 @@ public void testBasics() throws Exception { false, randomBoolean(), randomBoolean(), - IndexVersion.CURRENT + IndexVersion.current() ) ) { searcher = null; @@ -152,7 +152,7 @@ public void testBasics() throws Exception { true, randomBoolean(), randomBoolean(), - IndexVersion.CURRENT + IndexVersion.current() ) ) { searcher = null; @@ -175,7 +175,7 @@ public void testBasics() throws Exception { true, randomBoolean(), randomBoolean(), - IndexVersion.CURRENT + IndexVersion.current() ) ) { searcher = null; @@ -237,7 +237,7 @@ public void testSkipNonRootOfNestedDocuments() throws Exception { false, randomBoolean(), accessStats, - IndexVersion.CURRENT + IndexVersion.current() ) ) { if (accessStats) { diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java b/server/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java index 2ae7afa46121..5b81e264c382 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java @@ -88,7 +88,7 @@ public > IFD getForField(String type, String field final MapperBuilderContext context = MapperBuilderContext.root(false); if (type.equals("string")) { if (docValues) { - fieldType = new KeywordFieldMapper.Builder(fieldName, IndexVersion.CURRENT).build(context).fieldType(); + fieldType = new KeywordFieldMapper.Builder(fieldName, IndexVersion.current()).build(context).fieldType(); } else { fieldType = new TextFieldMapper.Builder(fieldName, createDefaultIndexAnalyzers()).fielddata(true) .build(context) @@ -101,7 +101,7 @@ public > IFD getForField(String type, String field ScriptCompiler.NONE, false, true, - IndexVersion.CURRENT, + IndexVersion.current(), null ).docValues(docValues).build(context).fieldType(); } else if (type.equals("double")) { @@ -111,7 +111,7 @@ public > IFD getForField(String type, String field ScriptCompiler.NONE, false, true, - IndexVersion.CURRENT, + IndexVersion.current(), null ).docValues(docValues).build(context).fieldType(); } else if (type.equals("long")) { @@ -121,7 +121,7 @@ public > IFD getForField(String type, String field ScriptCompiler.NONE, false, true, - IndexVersion.CURRENT, + IndexVersion.current(), null ).docValues(docValues).build(context).fieldType(); } else if (type.equals("int")) { @@ -131,7 +131,7 @@ public > IFD getForField(String type, String field ScriptCompiler.NONE, false, true, - IndexVersion.CURRENT, + IndexVersion.current(), null ).docValues(docValues).build(context).fieldType(); } else if (type.equals("short")) { @@ -141,7 +141,7 @@ public > IFD getForField(String type, String field ScriptCompiler.NONE, false, true, - IndexVersion.CURRENT, + IndexVersion.current(), null ).docValues(docValues).build(context).fieldType(); } else if (type.equals("byte")) { @@ -151,11 +151,11 @@ public > IFD getForField(String type, String field ScriptCompiler.NONE, false, true, - IndexVersion.CURRENT, + IndexVersion.current(), null ).docValues(docValues).build(context).fieldType(); } else if (type.equals("geo_point")) { - fieldType = new GeoPointFieldMapper.Builder(fieldName, ScriptCompiler.NONE, false, IndexVersion.CURRENT, null).docValues( + fieldType = new GeoPointFieldMapper.Builder(fieldName, ScriptCompiler.NONE, false, IndexVersion.current(), null).docValues( docValues ).build(context).fieldType(); } else if (type.equals("binary")) { diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java b/server/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java index 3165e0f4d670..4c0f36677d68 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java @@ -78,19 +78,19 @@ public void testGetForFieldDefaults() { indicesService.getCircuitBreakerService() ); MapperBuilderContext context = MapperBuilderContext.root(false); - final MappedFieldType stringMapper = new KeywordFieldMapper.Builder("string", IndexVersion.CURRENT).build(context).fieldType(); + final MappedFieldType stringMapper = new KeywordFieldMapper.Builder("string", IndexVersion.current()).build(context).fieldType(); ifdService.clear(); IndexFieldData fd = ifdService.getForField(stringMapper, FieldDataContext.noRuntimeFields("test")); assertTrue(fd instanceof SortedSetOrdinalsIndexFieldData); for (MappedFieldType mapper : Arrays.asList( - new NumberFieldMapper.Builder("int", BYTE, ScriptCompiler.NONE, false, true, IndexVersion.CURRENT, null).build(context) + new NumberFieldMapper.Builder("int", BYTE, ScriptCompiler.NONE, false, true, IndexVersion.current(), null).build(context) .fieldType(), - new NumberFieldMapper.Builder("int", SHORT, ScriptCompiler.NONE, false, true, IndexVersion.CURRENT, null).build(context) + new NumberFieldMapper.Builder("int", SHORT, ScriptCompiler.NONE, false, true, IndexVersion.current(), null).build(context) .fieldType(), - new NumberFieldMapper.Builder("int", INTEGER, ScriptCompiler.NONE, false, true, IndexVersion.CURRENT, null).build(context) + new NumberFieldMapper.Builder("int", INTEGER, ScriptCompiler.NONE, false, true, IndexVersion.current(), null).build(context) .fieldType(), - new NumberFieldMapper.Builder("long", LONG, ScriptCompiler.NONE, false, true, IndexVersion.CURRENT, null).build(context) + new NumberFieldMapper.Builder("long", LONG, ScriptCompiler.NONE, false, true, IndexVersion.current(), null).build(context) .fieldType() )) { ifdService.clear(); @@ -104,7 +104,7 @@ public void testGetForFieldDefaults() { ScriptCompiler.NONE, false, true, - IndexVersion.CURRENT, + IndexVersion.current(), null ).build(context).fieldType(); ifdService.clear(); @@ -117,7 +117,7 @@ public void testGetForFieldDefaults() { ScriptCompiler.NONE, false, true, - IndexVersion.CURRENT, + IndexVersion.current(), null ).build(context).fieldType(); ifdService.clear(); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java index d86e5f3ffba0..5b6b7f97759d 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java @@ -137,7 +137,7 @@ public double execute(ExplanationHolder explanation) { } }; } - }, searchContext.lookup(), 2.5f, "test", 0, IndexVersion.CURRENT)), equalTo(1)); + }, searchContext.lookup(), 2.5f, "test", 0, IndexVersion.current())), equalTo(1)); assertThat(searcher.count(new ScriptScoreQuery(new MatchAllDocsQuery(), new Script("test"), new ScoreScript.LeafFactory() { @Override public boolean needs_score() { @@ -154,7 +154,7 @@ public double execute(ExplanationHolder explanation) { } }; } - }, searchContext.lookup(), 2.5f, "test", 0, IndexVersion.CURRENT)), equalTo(1)); + }, searchContext.lookup(), 2.5f, "test", 0, IndexVersion.current())), equalTo(1)); } } } @@ -319,7 +319,7 @@ protected Query randomTermsQuery(MappedFieldType ft, SearchExecutionContext ctx) } public void testDualingQueries() throws IOException { - BooleanFieldMapper ootb = new BooleanFieldMapper.Builder("foo", ScriptCompiler.NONE, false, IndexVersion.CURRENT).build( + BooleanFieldMapper ootb = new BooleanFieldMapper.Builder("foo", ScriptCompiler.NONE, false, IndexVersion.current()).build( MapperBuilderContext.root(false) ); try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java index a633c0a5e406..091412826b79 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java @@ -747,7 +747,7 @@ public void testLegacyDateFormatName() { null, mock(ScriptService.class), true, - IndexVersion.CURRENT + IndexVersion.current() ); @SuppressWarnings("unchecked") diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateScriptFieldTypeTests.java index d61ef913f1ff..cbc6bf27a238 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DateScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DateScriptFieldTypeTests.java @@ -230,7 +230,7 @@ public double execute(ExplanationHolder explanation) { } }; } - }, searchContext.lookup(), 354.5f, "test", 0, IndexVersion.CURRENT)), equalTo(1)); + }, searchContext.lookup(), 354.5f, "test", 0, IndexVersion.current())), equalTo(1)); } } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java index fd1600cfa609..1d2e4fcec3ed 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java @@ -284,7 +284,7 @@ public void testMergeMetaForIndexTemplate() throws IOException { } public void testEmptyDocumentMapper() { - MapperService mapperService = createMapperService(IndexVersion.CURRENT, Settings.EMPTY, () -> false); + MapperService mapperService = createMapperService(IndexVersion.current(), Settings.EMPTY, () -> false); DocumentMapper documentMapper = DocumentMapper.createEmpty(mapperService); assertEquals("{\"_doc\":{}}", Strings.toString(documentMapper.mapping())); assertTrue(documentMapper.mappers().hasMappings()); @@ -428,7 +428,7 @@ public void testDeeplyNestedMapping() throws Exception { builders[i].endObject().endObject().endObject(); } - final MapperService mapperService = createMapperService(IndexVersion.CURRENT, Settings.EMPTY, () -> false); + final MapperService mapperService = createMapperService(IndexVersion.current(), Settings.EMPTY, () -> false); final CountDownLatch latch = new CountDownLatch(1); final Thread[] threads = new Thread[numThreads]; for (int i = 0; i < threads.length; i++) { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DoubleScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DoubleScriptFieldTypeTests.java index f920de76c85f..e79e1262593c 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DoubleScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DoubleScriptFieldTypeTests.java @@ -138,7 +138,7 @@ public double execute(ExplanationHolder explanation) { } }; } - }, searchContext.lookup(), 2.5f, "test", 0, IndexVersion.CURRENT)), equalTo(1)); + }, searchContext.lookup(), 2.5f, "test", 0, IndexVersion.current())), equalTo(1)); } } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java index 3ee34cb1fae0..3812dbf1bd9d 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java @@ -331,7 +331,7 @@ public void testDynamicTemplatesForIndexTemplate() throws IOException { .endArray() .endObject() ); - MapperService mapperService = createMapperService(IndexVersion.CURRENT, Settings.EMPTY, () -> true); + MapperService mapperService = createMapperService(IndexVersion.current(), Settings.EMPTY, () -> true); mapperService.merge(MapperService.SINGLE_MAPPING_NAME, new CompressedXContent(mapping), MapperService.MergeReason.INDEX_TEMPLATE); // There should be no update if templates are not set. diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java index 929b82592f5a..8fe8ff237059 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java @@ -158,7 +158,7 @@ public void testFieldAliasWithDifferentNestedScopes() { } private static FieldMapper createFieldMapper(String parent, String name) { - return new BooleanFieldMapper.Builder(name, ScriptCompiler.NONE, false, IndexVersion.CURRENT).build( + return new BooleanFieldMapper.Builder(name, ScriptCompiler.NONE, false, IndexVersion.current()).build( new MapperBuilderContext(parent, false) ); } @@ -168,7 +168,7 @@ private static ObjectMapper createObjectMapper(String name) { } private static NestedObjectMapper createNestedObjectMapper(String name) { - return new NestedObjectMapper.Builder(name, IndexVersion.CURRENT).build(MapperBuilderContext.root(false)); + return new NestedObjectMapper.Builder(name, IndexVersion.current()).build(MapperBuilderContext.root(false)); } private static MappingLookup createMappingLookup( diff --git a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldTypeTests.java index 221ea95413ab..bf6094473940 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldTypeTests.java @@ -24,9 +24,13 @@ public class GeoPointFieldTypeTests extends FieldTypeTestCase { public void testFetchSourceValue() throws IOException { boolean ignoreMalformed = randomBoolean(); - MappedFieldType mapper = new GeoPointFieldMapper.Builder("field", ScriptCompiler.NONE, ignoreMalformed, IndexVersion.CURRENT, null) - .build(MapperBuilderContext.root(false)) - .fieldType(); + MappedFieldType mapper = new GeoPointFieldMapper.Builder( + "field", + ScriptCompiler.NONE, + ignoreMalformed, + IndexVersion.current(), + null + ).build(MapperBuilderContext.root(false)).fieldType(); Map jsonPoint = Map.of("type", "Point", "coordinates", List.of(42.0, 27.1)); Map otherJsonPoint = Map.of("type", "Point", "coordinates", List.of(30.0, 50.0)); @@ -84,7 +88,7 @@ public void testFetchSourceValue() throws IOException { } public void testFetchVectorTile() throws IOException { - MappedFieldType mapper = new GeoPointFieldMapper.Builder("field", ScriptCompiler.NONE, false, IndexVersion.CURRENT, null).build( + MappedFieldType mapper = new GeoPointFieldMapper.Builder("field", ScriptCompiler.NONE, false, IndexVersion.current(), null).build( MapperBuilderContext.root(false) ).fieldType(); final int z = randomIntBetween(1, 10); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointScriptFieldTypeTests.java index 613f5bc4c398..acfa4790bdde 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointScriptFieldTypeTests.java @@ -148,7 +148,7 @@ public double execute(ExplanationHolder explanation) { } }; } - }, searchContext.lookup(), 2.5f, "test", 0, IndexVersion.CURRENT)), equalTo(1)); + }, searchContext.lookup(), 2.5f, "test", 0, IndexVersion.current())), equalTo(1)); } } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IdFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IdFieldTypeTests.java index 72952fd0cfe5..af8dceffa3d5 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IdFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IdFieldTypeTests.java @@ -46,7 +46,7 @@ public void testTermsQuery() { IndexMetadata indexMetadata = IndexMetadata.builder(IndexMetadata.INDEX_UUID_NA_VALUE).settings(indexSettings).build(); IndexSettings mockSettings = new IndexSettings(indexMetadata, Settings.EMPTY); Mockito.when(context.getIndexSettings()).thenReturn(mockSettings); - Mockito.when(context.indexVersionCreated()).thenReturn(IndexVersion.CURRENT); + Mockito.when(context.indexVersionCreated()).thenReturn(IndexVersion.current()); MappedFieldType ft = new ProvidedIdFieldMapper.IdFieldType(() -> false); Query query = ft.termQuery("id", context); assertEquals(new TermInSetQuery("_id", Uid.encodeId("id")), query); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java index 64dce43ac9e5..e53edc118b43 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java @@ -193,7 +193,7 @@ public void testNullValue() throws IOException { MapperParsingException e = expectThrows( MapperParsingException.class, - () -> createDocumentMapper(IndexVersion.CURRENT, fieldMapping(b -> { + () -> createDocumentMapper(IndexVersion.current(), fieldMapping(b -> { b.field("type", "ip"); b.field("null_value", ":1"); })) diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IpFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IpFieldTypeTests.java index 8c19bcf2051f..576655fca05c 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IpFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IpFieldTypeTests.java @@ -348,14 +348,14 @@ public void testRangeQuery() { } public void testFetchSourceValue() throws IOException { - MappedFieldType mapper = new IpFieldMapper.Builder("field", ScriptCompiler.NONE, true, IndexVersion.CURRENT).build( + MappedFieldType mapper = new IpFieldMapper.Builder("field", ScriptCompiler.NONE, true, IndexVersion.current()).build( MapperBuilderContext.root(false) ).fieldType(); assertEquals(List.of("2001:db8::2:1"), fetchSourceValue(mapper, "2001:db8::2:1")); assertEquals(List.of("2001:db8::2:1"), fetchSourceValue(mapper, "2001:db8:0:0:0:0:2:1")); assertEquals(List.of("::1"), fetchSourceValue(mapper, "0:0:0:0:0:0:0:1")); - MappedFieldType nullValueMapper = new IpFieldMapper.Builder("field", ScriptCompiler.NONE, true, IndexVersion.CURRENT).nullValue( + MappedFieldType nullValueMapper = new IpFieldMapper.Builder("field", ScriptCompiler.NONE, true, IndexVersion.current()).nullValue( "2001:db8:0:0:0:0:2:7" ).build(MapperBuilderContext.root(false)).fieldType(); assertEquals(List.of("2001:db8::2:7"), fetchSourceValue(nullValueMapper, null)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java index d668ee70b81d..8acb71d5d163 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java @@ -150,7 +150,7 @@ public double execute(ExplanationHolder explanation) { } }; } - }, searchContext.lookup(), 2.5f, "test", 0, IndexVersion.CURRENT)), equalTo(1)); + }, searchContext.lookup(), 2.5f, "test", 0, IndexVersion.current())), equalTo(1)); } } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java index 2ad4f0e03c4a..94e2506d2b2a 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java @@ -602,7 +602,7 @@ protected String minimalIsInvalidRoutingPathErrorMessage(Mapper mapper) { public void testDimensionInRoutingPath() throws IOException { MapperService mapper = createMapperService(fieldMapping(b -> b.field("type", "keyword").field("time_series_dimension", true))); IndexSettings settings = createIndexSettings( - IndexVersion.CURRENT, + IndexVersion.current(), Settings.builder() .put(IndexSettings.MODE.getKey(), "time_series") .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "field") diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java index a711a6d63f1c..60a4733cb0a0 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldTypeTests.java @@ -213,7 +213,7 @@ public void testNormalizeQueries() { } public void testFetchSourceValue() throws IOException { - MappedFieldType mapper = new KeywordFieldMapper.Builder("field", IndexVersion.CURRENT).build(MapperBuilderContext.root(false)) + MappedFieldType mapper = new KeywordFieldMapper.Builder("field", IndexVersion.current()).build(MapperBuilderContext.root(false)) .fieldType(); assertEquals(List.of("value"), fetchSourceValue(mapper, "value")); assertEquals(List.of("42"), fetchSourceValue(mapper, 42L)); @@ -222,7 +222,7 @@ public void testFetchSourceValue() throws IOException { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> fetchSourceValue(mapper, "value", "format")); assertEquals("Field [field] of type [keyword] doesn't support formats.", e.getMessage()); - MappedFieldType ignoreAboveMapper = new KeywordFieldMapper.Builder("field", IndexVersion.CURRENT).ignoreAbove(4) + MappedFieldType ignoreAboveMapper = new KeywordFieldMapper.Builder("field", IndexVersion.current()).ignoreAbove(4) .build(MapperBuilderContext.root(false)) .fieldType(); assertEquals(List.of(), fetchSourceValue(ignoreAboveMapper, "value")); @@ -233,13 +233,13 @@ public void testFetchSourceValue() throws IOException { "field", createIndexAnalyzers(), ScriptCompiler.NONE, - IndexVersion.CURRENT + IndexVersion.current() ).normalizer("lowercase").build(MapperBuilderContext.root(false)).fieldType(); assertEquals(List.of("value"), fetchSourceValue(normalizerMapper, "VALUE")); assertEquals(List.of("42"), fetchSourceValue(normalizerMapper, 42L)); assertEquals(List.of("value"), fetchSourceValue(normalizerMapper, "value")); - MappedFieldType nullValueMapper = new KeywordFieldMapper.Builder("field", IndexVersion.CURRENT).nullValue("NULL") + MappedFieldType nullValueMapper = new KeywordFieldMapper.Builder("field", IndexVersion.current()).nullValue("NULL") .build(MapperBuilderContext.root(false)) .fieldType(); assertEquals(List.of("NULL"), fetchSourceValue(nullValueMapper, null)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordScriptFieldTypeTests.java index cd124dc8dcd7..3bc4bb37d52f 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordScriptFieldTypeTests.java @@ -133,7 +133,7 @@ public double execute(ExplanationHolder explanation) { } }; } - }, searchContext.lookup(), 2.5f, "test", 0, IndexVersion.CURRENT)), equalTo(1)); + }, searchContext.lookup(), 2.5f, "test", 0, IndexVersion.current())), equalTo(1)); } } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/LongScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/LongScriptFieldTypeTests.java index 159896466808..145273b62e1e 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/LongScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/LongScriptFieldTypeTests.java @@ -174,7 +174,7 @@ public double execute(ExplanationHolder explanation) { } }; } - }, searchContext.lookup(), 2.5f, "test", 0, IndexVersion.CURRENT)), equalTo(1)); + }, searchContext.lookup(), 2.5f, "test", 0, IndexVersion.current())), equalTo(1)); } } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java index 5010123d156e..4e1c34d68e60 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java @@ -32,7 +32,7 @@ public class MappingParserTests extends MapperServiceTestCase { private static MappingParser createMappingParser(Settings settings) { - return createMappingParser(settings, IndexVersion.CURRENT, TransportVersion.current()); + return createMappingParser(settings, IndexVersion.current(), TransportVersion.current()); } private static MappingParser createMappingParser(Settings settings, IndexVersion version, TransportVersion transportVersion) { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldsSerializationTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldsSerializationTests.java index 5eb1248c2681..37bde4c922c3 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldsSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldsSerializationTests.java @@ -37,10 +37,10 @@ public void testSorting() { sortedNames.sort(Comparator.naturalOrder()); for (String name : names) { - builder.add(new BooleanFieldMapper.Builder(name, ScriptCompiler.NONE, false, IndexVersion.CURRENT)); + builder.add(new BooleanFieldMapper.Builder(name, ScriptCompiler.NONE, false, IndexVersion.current())); } - Mapper.Builder root = new BooleanFieldMapper.Builder("root", ScriptCompiler.NONE, false, IndexVersion.CURRENT); + Mapper.Builder root = new BooleanFieldMapper.Builder("root", ScriptCompiler.NONE, false, IndexVersion.current()); FieldMapper.MultiFields multiFields = builder.build(root, MapperBuilderContext.root(false)); String serialized = Strings.toString(multiFields); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NestedDocumentsTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NestedDocumentsTests.java index da31405e66e2..cdc9ee389b5f 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NestedDocumentsTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NestedDocumentsTests.java @@ -46,7 +46,7 @@ public void testSimpleNestedHierarchy() throws IOException { })); withLuceneIndex(mapperService, iw -> iw.addDocuments(doc.docs()), reader -> { - NestedDocuments nested = new NestedDocuments(mapperService.mappingLookup(), QueryBitSetProducer::new, IndexVersion.CURRENT); + NestedDocuments nested = new NestedDocuments(mapperService.mappingLookup(), QueryBitSetProducer::new, IndexVersion.current()); LeafNestedDocuments leaf = nested.getLeafNestedDocuments(reader.leaves().get(0)); assertNotNull(leaf.advance(0)); @@ -143,7 +143,7 @@ public void testMultiLevelNestedHierarchy() throws IOException { })); withLuceneIndex(mapperService, iw -> iw.addDocuments(doc.docs()), reader -> { - NestedDocuments nested = new NestedDocuments(mapperService.mappingLookup(), QueryBitSetProducer::new, IndexVersion.CURRENT); + NestedDocuments nested = new NestedDocuments(mapperService.mappingLookup(), QueryBitSetProducer::new, IndexVersion.current()); LeafNestedDocuments leaf = nested.getLeafNestedDocuments(reader.leaves().get(0)); assertNotNull(leaf.advance(0)); @@ -258,7 +258,7 @@ public void testNestedObjectWithinNonNestedObject() throws IOException { })); withLuceneIndex(mapperService, iw -> iw.addDocuments(doc.docs()), reader -> { - NestedDocuments nested = new NestedDocuments(mapperService.mappingLookup(), QueryBitSetProducer::new, IndexVersion.CURRENT); + NestedDocuments nested = new NestedDocuments(mapperService.mappingLookup(), QueryBitSetProducer::new, IndexVersion.current()); LeafNestedDocuments leaf = nested.getLeafNestedDocuments(reader.leaves().get(0)); assertNotNull(leaf.advance(0)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NestedLookupTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NestedLookupTests.java index c6fb7545a6df..f20516f96a07 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NestedLookupTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NestedLookupTests.java @@ -64,7 +64,7 @@ public void testMultiLevelParents() throws IOException { } private static NestedObjectMapper buildMapper(String name) { - return new NestedObjectMapper.Builder(name, IndexVersion.CURRENT).build(MapperBuilderContext.root(false)); + return new NestedObjectMapper.Builder(name, IndexVersion.current()).build(MapperBuilderContext.root(false)); } public void testAllParentFilters() { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java index d4edeb97f842..658f2ad503b7 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java @@ -1498,10 +1498,10 @@ public void testIndexTemplatesMergeIncludes() throws IOException { } public void testMergeNested() { - NestedObjectMapper firstMapper = new NestedObjectMapper.Builder("nested1", IndexVersion.CURRENT).includeInParent(true) + NestedObjectMapper firstMapper = new NestedObjectMapper.Builder("nested1", IndexVersion.current()).includeInParent(true) .includeInRoot(true) .build(MapperBuilderContext.root(false)); - NestedObjectMapper secondMapper = new NestedObjectMapper.Builder("nested1", IndexVersion.CURRENT).includeInParent(false) + NestedObjectMapper secondMapper = new NestedObjectMapper.Builder("nested1", IndexVersion.current()).includeInParent(false) .includeInRoot(true) .build(MapperBuilderContext.root(false)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldTypeTests.java index c01a2643e703..3c400a5b74c1 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldTypeTests.java @@ -812,7 +812,7 @@ public void testFetchSourceValue() throws IOException { ScriptCompiler.NONE, false, true, - IndexVersion.CURRENT, + IndexVersion.current(), null ).build(MapperBuilderContext.root(false)).fieldType(); assertEquals(List.of(3), fetchSourceValue(mapper, 3.14)); @@ -825,7 +825,7 @@ public void testFetchSourceValue() throws IOException { ScriptCompiler.NONE, false, true, - IndexVersion.CURRENT, + IndexVersion.current(), null ).nullValue(2.71f).build(MapperBuilderContext.root(false)).fieldType(); assertEquals(List.of(2.71f), fetchSourceValue(nullValueMapper, "")); @@ -839,7 +839,7 @@ public void testFetchHalfFloatFromSource() throws IOException { ScriptCompiler.NONE, false, true, - IndexVersion.CURRENT, + IndexVersion.current(), null ).build(MapperBuilderContext.root(false)).fieldType(); /* diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java index 474d61f7bd27..6c47f2055401 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java @@ -180,7 +180,7 @@ public void testMergedFieldNamesMultiFieldsWithinSubobjectsFalse() { } private static RootObjectMapper createRootSubobjectFalseLeafWithDots() { - FieldMapper.Builder fieldBuilder = new KeywordFieldMapper.Builder("host.name", IndexVersion.CURRENT); + FieldMapper.Builder fieldBuilder = new KeywordFieldMapper.Builder("host.name", IndexVersion.current()); FieldMapper fieldMapper = fieldBuilder.build(MapperBuilderContext.root(false)); assertEquals("host.name", fieldMapper.simpleName()); assertEquals("host.name", fieldMapper.name()); @@ -188,7 +188,7 @@ private static RootObjectMapper createRootSubobjectFalseLeafWithDots() { } private static ObjectMapper.Builder createObjectSubobjectsFalseLeafWithDots() { - KeywordFieldMapper.Builder fieldBuilder = new KeywordFieldMapper.Builder("host.name", IndexVersion.CURRENT); + KeywordFieldMapper.Builder fieldBuilder = new KeywordFieldMapper.Builder("host.name", IndexVersion.current()); KeywordFieldMapper fieldMapper = fieldBuilder.build(new MapperBuilderContext("foo.metrics", false)); assertEquals("host.name", fieldMapper.simpleName()); assertEquals("foo.metrics.host.name", fieldMapper.name()); @@ -212,7 +212,7 @@ private ObjectMapper.Builder createObjectSubobjectsFalseLeafWithMultiField() { private TextFieldMapper.Builder createTextKeywordMultiField(String name) { TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name, createDefaultIndexAnalyzers()); - builder.multiFieldsBuilder.add(new KeywordFieldMapper.Builder("keyword", IndexVersion.CURRENT)); + builder.multiFieldsBuilder.add(new KeywordFieldMapper.Builder("keyword", IndexVersion.current())); return builder; } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ParametrizedMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ParametrizedMapperTests.java index 471cebd45824..189ded90d0c1 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ParametrizedMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ParametrizedMapperTests.java @@ -290,7 +290,7 @@ private static TestMapper fromMapping(String mapping, IndexVersion version, Tran } private static TestMapper fromMapping(String mapping) { - return fromMapping(mapping, IndexVersion.CURRENT, TransportVersion.current()); + return fromMapping(mapping, IndexVersion.current(), TransportVersion.current()); } private String toStringWithDefaults(ToXContent value) throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldAnalyzerModeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldAnalyzerModeTests.java index 3ae5de5ab307..cbeafd0ea404 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldAnalyzerModeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldAnalyzerModeTests.java @@ -67,7 +67,7 @@ public void testParseTextFieldCheckAnalyzerAnalysisMode() { Map fieldNode = new HashMap<>(); fieldNode.put("analyzer", "my_analyzer"); MappingParserContext parserContext = mock(MappingParserContext.class); - when(parserContext.indexVersionCreated()).thenReturn(IndexVersion.CURRENT); + when(parserContext.indexVersionCreated()).thenReturn(IndexVersion.current()); // check AnalysisMode.ALL works Map analyzers = defaultAnalyzers(); @@ -102,7 +102,7 @@ public void testParseTextFieldCheckSearchAnalyzerAnalysisMode() { fieldNode.put("search_analyzer", "standard"); } MappingParserContext parserContext = mock(MappingParserContext.class); - when(parserContext.indexVersionCreated()).thenReturn(IndexVersion.CURRENT); + when(parserContext.indexVersionCreated()).thenReturn(IndexVersion.current()); // check AnalysisMode.ALL and AnalysisMode.SEARCH_TIME works Map analyzers = defaultAnalyzers(); @@ -143,7 +143,7 @@ public void testParseTextFieldCheckAnalyzerWithSearchAnalyzerAnalysisMode() { Map fieldNode = new HashMap<>(); fieldNode.put("analyzer", "my_analyzer"); MappingParserContext parserContext = mock(MappingParserContext.class); - when(parserContext.indexVersionCreated()).thenReturn(IndexVersion.CURRENT); + when(parserContext.indexVersionCreated()).thenReturn(IndexVersion.current()); // check that "analyzer" set to AnalysisMode.INDEX_TIME is blocked if there is no search analyzer AnalysisMode mode = AnalysisMode.INDEX_TIME; diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java index 6be5861b49b9..338de51efb63 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java @@ -112,7 +112,7 @@ public void testMultiFieldWithinMultiField() throws IOException { // For indices created in 8.0 or later, we should throw an error. Map fieldNodeCopy = XContentHelper.convertToMap(BytesReference.bytes(mapping), true, mapping.contentType()).v2(); - IndexVersion version = IndexVersionUtils.randomVersionBetween(random(), IndexVersion.V_8_0_0, IndexVersion.CURRENT); + IndexVersion version = IndexVersionUtils.randomVersionBetween(random(), IndexVersion.V_8_0_0, IndexVersion.current()); TransportVersion transportVersion = TransportVersionUtils.randomVersionBetween( random(), TransportVersion.V_8_0_0, diff --git a/server/src/test/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapperTests.java index 2fde0e686585..0b2b3b917789 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapperTests.java @@ -418,7 +418,7 @@ public void testAllDimensionsInRoutingPath() throws IOException { fieldMapping(b -> b.field("type", "flattened").field("time_series_dimensions", List.of("key1", "subfield.key2"))) ); IndexSettings settings = createIndexSettings( - IndexVersion.CURRENT, + IndexVersion.current(), Settings.builder() .put(IndexSettings.MODE.getKey(), "time_series") .putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), List.of("field.key1", "field.subfield.key2")) @@ -436,7 +436,7 @@ public void testSomeDimensionsInRoutingPath() throws IOException { ) ); IndexSettings settings = createIndexSettings( - IndexVersion.CURRENT, + IndexVersion.current(), Settings.builder() .put(IndexSettings.MODE.getKey(), "time_series") .putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), List.of("field.key1", "field.subfield.key2")) @@ -452,7 +452,7 @@ public void testMissingDimensionInRoutingPath() throws IOException { fieldMapping(b -> b.field("type", "flattened").field("time_series_dimensions", List.of("key1", "subfield.key2"))) ); IndexSettings settings = createIndexSettings( - IndexVersion.CURRENT, + IndexVersion.current(), Settings.builder() .put(IndexSettings.MODE.getKey(), "time_series") .putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), List.of("field.key1", "field.subfield.key2", "field.key3")) @@ -483,7 +483,7 @@ public void testRoutingPathWithKeywordsAndFlattenedFields() throws IOException { b.endObject(); })); IndexSettings settings = createIndexSettings( - IndexVersion.CURRENT, + IndexVersion.current(), Settings.builder() .put(IndexSettings.MODE.getKey(), "time_series") .putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), List.of("flattened_field.key1", "keyword_field")) diff --git a/server/src/test/java/org/elasticsearch/index/mapper/vectors/BinaryDenseVectorScriptDocValuesTests.java b/server/src/test/java/org/elasticsearch/index/mapper/vectors/BinaryDenseVectorScriptDocValuesTests.java index 42f28b081131..59ec920e891e 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/vectors/BinaryDenseVectorScriptDocValuesTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/vectors/BinaryDenseVectorScriptDocValuesTests.java @@ -31,7 +31,7 @@ public void testFloatGetVectorValueAndGetMagnitude() throws IOException { float[][] vectors = { { 1, 1, 1 }, { 1, 1, 2 }, { 1, 1, 3 } }; float[] expectedMagnitudes = { 1.7320f, 2.4495f, 3.3166f }; - for (IndexVersion indexVersion : List.of(IndexVersion.V_7_4_0, IndexVersion.CURRENT)) { + for (IndexVersion indexVersion : List.of(IndexVersion.V_7_4_0, IndexVersion.current())) { BinaryDocValues docValues = wrap(vectors, ElementType.FLOAT, indexVersion); DenseVectorDocValuesField field = new BinaryDenseVectorDocValuesField(docValues, "test", ElementType.FLOAT, dims, indexVersion); DenseVectorScriptDocValues scriptDocValues = field.toScriptDocValues(); @@ -50,7 +50,7 @@ public void testByteGetVectorValueAndGetMagnitude() throws IOException { float[][] vectors = { { 1, 1, 1 }, { 1, 1, 2 }, { 1, 1, 3 } }; float[] expectedMagnitudes = { 1.7320f, 2.4495f, 3.3166f }; - BinaryDocValues docValues = wrap(vectors, ElementType.BYTE, IndexVersion.CURRENT); + BinaryDocValues docValues = wrap(vectors, ElementType.BYTE, IndexVersion.current()); DenseVectorDocValuesField field = new ByteBinaryDenseVectorDocValuesField(docValues, "test", ElementType.BYTE, dims); DenseVectorScriptDocValues scriptDocValues = field.toScriptDocValues(); for (int i = 0; i < vectors.length; i++) { @@ -64,7 +64,7 @@ public void testByteGetVectorValueAndGetMagnitude() throws IOException { public void testFloatMetadataAndIterator() throws IOException { int dims = 3; - IndexVersion indexVersion = IndexVersion.CURRENT; + IndexVersion indexVersion = IndexVersion.current(); float[][] vectors = fill(new float[randomIntBetween(1, 5)][dims], ElementType.FLOAT); BinaryDocValues docValues = wrap(vectors, ElementType.FLOAT, indexVersion); DenseVectorDocValuesField field = new BinaryDenseVectorDocValuesField(docValues, "test", ElementType.FLOAT, dims, indexVersion); @@ -84,7 +84,7 @@ public void testFloatMetadataAndIterator() throws IOException { public void testByteMetadataAndIterator() throws IOException { int dims = 3; - IndexVersion indexVersion = IndexVersion.CURRENT; + IndexVersion indexVersion = IndexVersion.current(); float[][] vectors = fill(new float[randomIntBetween(1, 5)][dims], ElementType.BYTE); BinaryDocValues docValues = wrap(vectors, ElementType.BYTE, indexVersion); DenseVectorDocValuesField field = new ByteBinaryDenseVectorDocValuesField(docValues, "test", ElementType.BYTE, dims); @@ -114,13 +114,13 @@ protected float[][] fill(float[][] vectors, ElementType elementType) { public void testFloatMissingValues() throws IOException { int dims = 3; float[][] vectors = { { 1, 1, 1 }, { 1, 1, 2 }, { 1, 1, 3 } }; - BinaryDocValues docValues = wrap(vectors, ElementType.FLOAT, IndexVersion.CURRENT); + BinaryDocValues docValues = wrap(vectors, ElementType.FLOAT, IndexVersion.current()); DenseVectorDocValuesField field = new BinaryDenseVectorDocValuesField( docValues, "test", ElementType.FLOAT, dims, - IndexVersion.CURRENT + IndexVersion.current() ); DenseVectorScriptDocValues scriptDocValues = field.toScriptDocValues(); @@ -136,7 +136,7 @@ public void testFloatMissingValues() throws IOException { public void testByteMissingValues() throws IOException { int dims = 3; float[][] vectors = { { 1, 1, 1 }, { 1, 1, 2 }, { 1, 1, 3 } }; - BinaryDocValues docValues = wrap(vectors, ElementType.FLOAT, IndexVersion.CURRENT); + BinaryDocValues docValues = wrap(vectors, ElementType.FLOAT, IndexVersion.current()); DenseVectorDocValuesField field = new ByteBinaryDenseVectorDocValuesField(docValues, "test", ElementType.BYTE, dims); DenseVectorScriptDocValues scriptDocValues = field.toScriptDocValues(); @@ -152,13 +152,13 @@ public void testByteMissingValues() throws IOException { public void testFloatGetFunctionIsNotAccessible() throws IOException { int dims = 3; float[][] vectors = { { 1, 1, 1 }, { 1, 1, 2 }, { 1, 1, 3 } }; - BinaryDocValues docValues = wrap(vectors, ElementType.FLOAT, IndexVersion.CURRENT); + BinaryDocValues docValues = wrap(vectors, ElementType.FLOAT, IndexVersion.current()); DenseVectorDocValuesField field = new BinaryDenseVectorDocValuesField( docValues, "test", ElementType.FLOAT, dims, - IndexVersion.CURRENT + IndexVersion.current() ); DenseVectorScriptDocValues scriptDocValues = field.toScriptDocValues(); @@ -175,7 +175,7 @@ public void testFloatGetFunctionIsNotAccessible() throws IOException { public void testByteGetFunctionIsNotAccessible() throws IOException { int dims = 3; float[][] vectors = { { 1, 1, 1 }, { 1, 1, 2 }, { 1, 1, 3 } }; - BinaryDocValues docValues = wrap(vectors, ElementType.BYTE, IndexVersion.CURRENT); + BinaryDocValues docValues = wrap(vectors, ElementType.BYTE, IndexVersion.current()); DenseVectorDocValuesField field = new ByteBinaryDenseVectorDocValuesField(docValues, "test", ElementType.BYTE, dims); DenseVectorScriptDocValues scriptDocValues = field.toScriptDocValues(); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java index 4b191d369dd1..a122ff4f559b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java @@ -241,8 +241,8 @@ public void testDefaults() throws Exception { assertThat(fields.get(0), instanceOf(BinaryDocValuesField.class)); // assert that after decoding the indexed value is equal to expected BytesRef vectorBR = fields.get(0).binaryValue(); - float[] decodedValues = decodeDenseVector(IndexVersion.CURRENT, vectorBR); - float decodedMagnitude = VectorEncoderDecoder.decodeMagnitude(IndexVersion.CURRENT, vectorBR); + float[] decodedValues = decodeDenseVector(IndexVersion.current(), vectorBR); + float decodedMagnitude = VectorEncoderDecoder.decodeMagnitude(IndexVersion.current(), vectorBR); assertEquals(expectedMagnitude, decodedMagnitude, 0.001f); assertArrayEquals("Decoded dense vector values is not equal to the indexed one.", validVector, decodedValues, 0.001f); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java index a9af336c4a3e..d22056d49beb 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java @@ -36,7 +36,7 @@ public DenseVectorFieldTypeTests() { private DenseVectorFieldType createFloatFieldType() { return new DenseVectorFieldType( "f", - IndexVersion.CURRENT, + IndexVersion.current(), DenseVectorFieldMapper.ElementType.FLOAT, 5, indexed, @@ -48,7 +48,7 @@ private DenseVectorFieldType createFloatFieldType() { private DenseVectorFieldType createByteFieldType() { return new DenseVectorFieldType( "f", - IndexVersion.CURRENT, + IndexVersion.current(), DenseVectorFieldMapper.ElementType.BYTE, 5, true, @@ -113,7 +113,7 @@ public void testFetchSourceValue() throws IOException { public void testFloatCreateKnnQuery() { DenseVectorFieldType unindexedField = new DenseVectorFieldType( "f", - IndexVersion.CURRENT, + IndexVersion.current(), DenseVectorFieldMapper.ElementType.FLOAT, 3, false, @@ -128,7 +128,7 @@ public void testFloatCreateKnnQuery() { DenseVectorFieldType dotProductField = new DenseVectorFieldType( "f", - IndexVersion.CURRENT, + IndexVersion.current(), DenseVectorFieldMapper.ElementType.FLOAT, 3, true, @@ -143,7 +143,7 @@ public void testFloatCreateKnnQuery() { DenseVectorFieldType cosineField = new DenseVectorFieldType( "f", - IndexVersion.CURRENT, + IndexVersion.current(), DenseVectorFieldMapper.ElementType.FLOAT, 3, true, @@ -161,7 +161,7 @@ public void testCreateKnnQueryMaxDims() { { // float type with 2048 dims DenseVectorFieldType fieldWith2048dims = new DenseVectorFieldType( "f", - IndexVersion.CURRENT, + IndexVersion.current(), DenseVectorFieldMapper.ElementType.FLOAT, 2048, true, @@ -179,7 +179,7 @@ public void testCreateKnnQueryMaxDims() { { // byte type with 2048 dims DenseVectorFieldType fieldWith2048dims = new DenseVectorFieldType( "f", - IndexVersion.CURRENT, + IndexVersion.current(), DenseVectorFieldMapper.ElementType.BYTE, 2048, true, @@ -198,7 +198,7 @@ public void testCreateKnnQueryMaxDims() { public void testByteCreateKnnQuery() { DenseVectorFieldType unindexedField = new DenseVectorFieldType( "f", - IndexVersion.CURRENT, + IndexVersion.current(), DenseVectorFieldMapper.ElementType.BYTE, 3, false, @@ -213,7 +213,7 @@ public void testByteCreateKnnQuery() { DenseVectorFieldType cosineField = new DenseVectorFieldType( "f", - IndexVersion.CURRENT, + IndexVersion.current(), DenseVectorFieldMapper.ElementType.BYTE, 3, true, diff --git a/server/src/test/java/org/elasticsearch/index/query/ExistsQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/ExistsQueryBuilderTests.java index efc37610de8c..6ddbbf25c987 100644 --- a/server/src/test/java/org/elasticsearch/index/query/ExistsQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/ExistsQueryBuilderTests.java @@ -24,6 +24,7 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.CoreMatchers.sameInstance; public class ExistsQueryBuilderTests extends AbstractQueryTestCase { @Override @@ -121,4 +122,20 @@ public void testFromJson() throws IOException { assertEquals(json, 42.0, parsed.boost(), 0.0001); assertEquals(json, "user", parsed.fieldName()); } + + public void testRewriteIndexQueryToMatchNone() throws IOException { + ExistsQueryBuilder query = QueryBuilders.existsQuery("does_not_exist"); + for (QueryRewriteContext context : new QueryRewriteContext[] { createSearchExecutionContext(), createQueryRewriteContext() }) { + QueryBuilder rewritten = query.rewrite(context); + assertThat(rewritten, instanceOf(MatchNoneQueryBuilder.class)); + } + } + + public void testRewriteIndexQueryToNotMatchNone() throws IOException { + ExistsQueryBuilder query = QueryBuilders.existsQuery(KEYWORD_FIELD_NAME); + for (QueryRewriteContext context : new QueryRewriteContext[] { createSearchExecutionContext(), createQueryRewriteContext() }) { + QueryBuilder rewritten = query.rewrite(context); + assertThat(rewritten, sameInstance(query)); + } + } } diff --git a/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java index 954f0881fca0..278d4ae505bd 100644 --- a/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java @@ -604,55 +604,67 @@ public void testCachingStrategiesWithNow() throws IOException { public void testRewriteToTermQueries() throws IOException { MatchQueryBuilder queryBuilder = new MatchQueryBuilder(KEYWORD_FIELD_NAME, "value"); queryBuilder.boost(2f); - SearchExecutionContext context = createSearchExecutionContext(); - QueryBuilder rewritten = queryBuilder.rewrite(context); - assertThat(rewritten, instanceOf(TermQueryBuilder.class)); - TermQueryBuilder tqb = (TermQueryBuilder) rewritten; - assertEquals(KEYWORD_FIELD_NAME, tqb.fieldName); - assertEquals(new BytesRef("value"), tqb.value); - assertThat(rewritten.boost(), equalTo(2f)); + QueryRewriteContext[] contexts = new QueryRewriteContext[] { createSearchExecutionContext(), createQueryRewriteContext() }; + for (QueryRewriteContext context : contexts) { + QueryBuilder rewritten = queryBuilder.rewrite(context); + assertThat(rewritten, instanceOf(TermQueryBuilder.class)); + TermQueryBuilder tqb = (TermQueryBuilder) rewritten; + assertEquals(KEYWORD_FIELD_NAME, tqb.fieldName); + assertEquals(new BytesRef("value"), tqb.value); + assertThat(rewritten.boost(), equalTo(2f)); + } } public void testRewriteToTermQueryWithAnalyzer() throws IOException { MatchQueryBuilder queryBuilder = new MatchQueryBuilder(TEXT_FIELD_NAME, "value"); queryBuilder.analyzer("keyword"); - SearchExecutionContext context = createSearchExecutionContext(); - QueryBuilder rewritten = queryBuilder.rewrite(context); - assertThat(rewritten, instanceOf(TermQueryBuilder.class)); - TermQueryBuilder tqb = (TermQueryBuilder) rewritten; - assertEquals(TEXT_FIELD_NAME, tqb.fieldName); - assertEquals(new BytesRef("value"), tqb.value); + QueryRewriteContext[] contexts = new QueryRewriteContext[] { createSearchExecutionContext(), createQueryRewriteContext() }; + for (QueryRewriteContext context : contexts) { + QueryBuilder rewritten = queryBuilder.rewrite(context); + assertThat(rewritten, instanceOf(TermQueryBuilder.class)); + TermQueryBuilder tqb = (TermQueryBuilder) rewritten; + assertEquals(TEXT_FIELD_NAME, tqb.fieldName); + assertEquals(new BytesRef("value"), tqb.value); + } } public void testRewriteWithFuzziness() throws IOException { // If we've configured fuzziness then we can't rewrite to a term query MatchQueryBuilder queryBuilder = new MatchQueryBuilder(KEYWORD_FIELD_NAME, "value"); queryBuilder.fuzziness(Fuzziness.AUTO); - SearchExecutionContext context = createSearchExecutionContext(); - QueryBuilder rewritten = queryBuilder.rewrite(context); - assertEquals(queryBuilder, rewritten); + QueryRewriteContext[] contexts = new QueryRewriteContext[] { createSearchExecutionContext(), createQueryRewriteContext() }; + for (QueryRewriteContext context : contexts) { + QueryBuilder rewritten = queryBuilder.rewrite(context); + assertEquals(queryBuilder, rewritten); + } } public void testRewriteWithLeniency() throws IOException { // If we've configured leniency then we can't rewrite to a term query MatchQueryBuilder queryBuilder = new MatchQueryBuilder(KEYWORD_FIELD_NAME, "value"); queryBuilder.lenient(true); - SearchExecutionContext context = createSearchExecutionContext(); - QueryBuilder rewritten = queryBuilder.rewrite(context); - assertEquals(queryBuilder, rewritten); + QueryRewriteContext[] contexts = new QueryRewriteContext[] { createSearchExecutionContext(), createQueryRewriteContext() }; + for (QueryRewriteContext context : contexts) { + QueryBuilder rewritten = queryBuilder.rewrite(context); + assertEquals(queryBuilder, rewritten); + } } public void testRewriteIndexQueryToMatchNone() throws IOException { QueryBuilder query = new MatchQueryBuilder("_index", "does_not_exist"); - SearchExecutionContext searchExecutionContext = createSearchExecutionContext(); - QueryBuilder rewritten = query.rewrite(searchExecutionContext); - assertThat(rewritten, instanceOf(MatchNoneQueryBuilder.class)); + QueryRewriteContext[] contexts = new QueryRewriteContext[] { createSearchExecutionContext(), createQueryRewriteContext() }; + for (QueryRewriteContext context : contexts) { + QueryBuilder rewritten = query.rewrite(context); + assertThat(rewritten, instanceOf(MatchNoneQueryBuilder.class)); + } } public void testRewriteIndexQueryToNotMatchNone() throws IOException { QueryBuilder query = new MatchQueryBuilder("_index", getIndex().getName()); - SearchExecutionContext searchExecutionContext = createSearchExecutionContext(); - QueryBuilder rewritten = query.rewrite(searchExecutionContext); - assertThat(rewritten, instanceOf(MatchAllQueryBuilder.class)); + QueryRewriteContext[] contexts = new QueryRewriteContext[] { createSearchExecutionContext(), createQueryRewriteContext() }; + for (QueryRewriteContext context : contexts) { + QueryBuilder rewritten = query.rewrite(context); + assertThat(rewritten, instanceOf(MatchAllQueryBuilder.class)); + } } } diff --git a/server/src/test/java/org/elasticsearch/index/query/PrefixQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/PrefixQueryBuilderTests.java index 676f5dda96ff..dc658272927a 100644 --- a/server/src/test/java/org/elasticsearch/index/query/PrefixQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/PrefixQueryBuilderTests.java @@ -152,16 +152,18 @@ public void testParseFailsWithMultipleFields() throws IOException { public void testRewriteIndexQueryToMatchNone() throws Exception { PrefixQueryBuilder query = prefixQuery("_index", "does_not_exist"); - SearchExecutionContext searchExecutionContext = createSearchExecutionContext(); - QueryBuilder rewritten = query.rewrite(searchExecutionContext); - assertThat(rewritten, instanceOf(MatchNoneQueryBuilder.class)); + for (QueryRewriteContext context : new QueryRewriteContext[] { createSearchExecutionContext(), createQueryRewriteContext() }) { + QueryBuilder rewritten = query.rewrite(context); + assertThat(rewritten, instanceOf(MatchNoneQueryBuilder.class)); + } } public void testRewriteIndexQueryToNotMatchNone() throws Exception { PrefixQueryBuilder query = prefixQuery("_index", getIndex().getName()); - SearchExecutionContext searchExecutionContext = createSearchExecutionContext(); - QueryBuilder rewritten = query.rewrite(searchExecutionContext); - assertThat(rewritten, instanceOf(MatchAllQueryBuilder.class)); + for (QueryRewriteContext context : new QueryRewriteContext[] { createSearchExecutionContext(), createQueryRewriteContext() }) { + QueryBuilder rewritten = query.rewrite(context); + assertThat(rewritten, instanceOf(MatchAllQueryBuilder.class)); + } } @Override diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java index 68f6be4d6467..7041f6db7f29 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java @@ -13,8 +13,10 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.replication.ReplicationOperation; import org.elasticsearch.action.support.replication.TransportReplicationAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.IOUtils; @@ -30,6 +32,7 @@ import org.elasticsearch.index.shard.ShardNotInPrimaryModeException; import org.elasticsearch.indices.EmptySystemIndices; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.threadpool.TestThreadPool; @@ -37,6 +40,7 @@ import org.elasticsearch.transport.TransportService; import java.util.Collections; +import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; import static org.elasticsearch.index.seqno.RetentionLeaseSyncAction.getExceptionLogLevel; @@ -192,21 +196,17 @@ public void testExceptionLogLevel() { assertEquals(Level.WARN, getExceptionLogLevel(new RuntimeException("simulated"))); assertEquals(Level.WARN, getExceptionLogLevel(new RuntimeException("simulated", new RuntimeException("simulated")))); - assertEquals(Level.DEBUG, getExceptionLogLevel(new IndexNotFoundException("index"))); - assertEquals(Level.DEBUG, getExceptionLogLevel(new RuntimeException("simulated", new IndexNotFoundException("index")))); - - assertEquals(Level.DEBUG, getExceptionLogLevel(new AlreadyClosedException("index"))); - assertEquals(Level.DEBUG, getExceptionLogLevel(new RuntimeException("simulated", new AlreadyClosedException("index")))); - final var shardId = new ShardId("test", "_na_", 0); - - assertEquals(Level.DEBUG, getExceptionLogLevel(new IndexShardClosedException(shardId))); - assertEquals(Level.DEBUG, getExceptionLogLevel(new RuntimeException("simulated", new IndexShardClosedException(shardId)))); - - assertEquals(Level.DEBUG, getExceptionLogLevel(new ShardNotInPrimaryModeException(shardId, IndexShardState.CLOSED))); - assertEquals( - Level.DEBUG, - getExceptionLogLevel(new RuntimeException("simulated", new ShardNotInPrimaryModeException(shardId, IndexShardState.CLOSED))) - ); + for (final var exception : List.of( + new NodeClosedException(DiscoveryNodeUtils.create("node")), + new IndexNotFoundException(shardId.getIndexName()), + new AlreadyClosedException(shardId.getIndexName()), + new IndexShardClosedException(shardId), + new ShardNotInPrimaryModeException(shardId, IndexShardState.CLOSED), + new ReplicationOperation.RetryOnPrimaryException(shardId, "test") + )) { + assertEquals(Level.DEBUG, getExceptionLogLevel(exception)); + assertEquals(Level.DEBUG, getExceptionLogLevel(new RuntimeException("wrapper", exception))); + } } } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index ff2a88ae16fe..f5b8e78c1611 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -70,6 +70,7 @@ import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.engine.DocIdSeqNoAndSource; @@ -2612,7 +2613,7 @@ public void testRestoreShard() throws IOException { new RecoverySource.SnapshotRecoverySource( UUIDs.randomBase64UUID(), snapshot, - Version.CURRENT, + IndexVersion.current(), new IndexId("test", UUIDs.randomBase64UUID(random())) ) ); @@ -3817,12 +3818,12 @@ public void testScheduledRefresh() throws Exception { assertBusy(() -> assertThat(primary.getThreadPool().relativeTimeInMillis(), greaterThan(lastSearchAccess))); // Make shard search active again and ensure previously index document is visible: - CountDownLatch latch = new CountDownLatch(1); - primary.ensureShardSearchActive(refreshed -> { - assertTrue(refreshed); - latch.countDown(); + long refreshesBefore = primary.refreshStats().getTotal(); + primary.ensureShardSearchActive(registered -> { assertTrue(registered); }); + assertBusy(() -> { + assertFalse(primary.hasRefreshPending()); + assertThat(primary.refreshStats().getTotal(), equalTo(refreshesBefore + 1)); }); - latch.await(); assertNotEquals( "awaitShardSearchActive must access a searcher to remove search idle state", lastSearchAccess, @@ -3833,19 +3834,19 @@ public void testScheduledRefresh() throws Exception { assertEquals(2, searcher.getIndexReader().numDocs()); } - // No documents were added and shard is search active so makeShardSearchActive(...) should behave like a noop: + // No documents were added and shard is search active so ensureShardSearchActive(...) should behave like a noop: assertFalse(primary.getEngine().refreshNeeded()); - CountDownLatch latch1 = new CountDownLatch(1); - primary.ensureShardSearchActive(refreshed -> { - assertFalse(refreshed); + CountDownLatch latch = new CountDownLatch(1); + primary.ensureShardSearchActive(registered -> { + assertFalse(registered); try (Engine.Searcher searcher = primary.acquireSearcher("test")) { assertEquals(2, searcher.getIndexReader().numDocs()); } finally { - latch1.countDown(); + latch.countDown(); } }); - latch1.await(); + latch.await(); // Index a document while shard is search active and ensure scheduleRefresh(...) makes documen visible: indexDoc(primary, "_doc", "2", "{\"foo\" : \"bar\"}"); diff --git a/server/src/test/java/org/elasticsearch/index/translog/BufferedChecksumStreamInputTests.java b/server/src/test/java/org/elasticsearch/index/translog/BufferedChecksumStreamInputTests.java new file mode 100644 index 000000000000..1fd3bac12f21 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/translog/BufferedChecksumStreamInputTests.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.translog; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.AbstractStreamTests; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; +import java.util.zip.CRC32; + +public class BufferedChecksumStreamInputTests extends AbstractStreamTests { + + @Override + protected StreamInput getStreamInput(BytesReference bytesReference) { + return new BufferedChecksumStreamInput(StreamInput.wrap(BytesReference.toBytes(bytesReference)), "test"); + } + + public void testChecksum() throws IOException { + int bytesSize = randomIntBetween(512, 2048); + byte[] bytes = randomByteArrayOfLength(bytesSize); + CRC32 crc32 = new CRC32(); + crc32.update(bytes); + + try (BufferedChecksumStreamInput input = new BufferedChecksumStreamInput(StreamInput.wrap(bytes), "test")) { + int read = input.read(new byte[bytesSize]); + assertEquals(bytesSize, read); + assertEquals(-1, input.read()); + assertEquals(crc32.getValue(), input.getChecksum()); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java index 8aab1107b1a4..5d3c4e98f862 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java @@ -91,9 +91,9 @@ public Map getMetadataMappers() { public void testBuiltinMappers() { IndicesModule module = new IndicesModule(Collections.emptyList()); { - IndexVersion version = IndexVersionUtils.randomVersionBetween(random(), IndexVersion.V_8_0_0, IndexVersion.CURRENT); + IndexVersion version = IndexVersionUtils.randomVersionBetween(random(), IndexVersion.V_8_0_0, IndexVersion.current()); assertThat( - module.getMapperRegistry().getMapperParser("object", IndexVersion.CURRENT), + module.getMapperRegistry().getMapperParser("object", IndexVersion.current()), instanceOf(ObjectMapper.TypeParser.class) ); assertFalse(module.getMapperRegistry().getMetadataMapperParsers(version).isEmpty()); @@ -119,14 +119,14 @@ public void testBuiltinWithPlugins() { IndicesModule noPluginsModule = new IndicesModule(Collections.emptyList()); IndicesModule module = new IndicesModule(fakePlugins); MapperRegistry registry = module.getMapperRegistry(); - assertThat(registry.getMapperParser("fake-mapper", IndexVersion.CURRENT), instanceOf(FakeMapperParser.class)); - assertNull(noPluginsModule.getMapperRegistry().getMapperParser("fake-mapper", IndexVersion.CURRENT)); + assertThat(registry.getMapperParser("fake-mapper", IndexVersion.current()), instanceOf(FakeMapperParser.class)); + assertNull(noPluginsModule.getMapperRegistry().getMapperParser("fake-mapper", IndexVersion.current())); assertThat( - registry.getMetadataMapperParsers(IndexVersion.CURRENT).size(), - greaterThan(noPluginsModule.getMapperRegistry().getMetadataMapperParsers(IndexVersion.CURRENT).size()) + registry.getMetadataMapperParsers(IndexVersion.current()).size(), + greaterThan(noPluginsModule.getMapperRegistry().getMetadataMapperParsers(IndexVersion.current()).size()) ); Map metadataMapperParsers = module.getMapperRegistry() - .getMetadataMapperParsers(IndexVersion.CURRENT); + .getMetadataMapperParsers(IndexVersion.current()); Iterator iterator = metadataMapperParsers.keySet().iterator(); assertEquals(IgnoredFieldMapper.NAME, iterator.next()); String last = null; diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index bd72741a65a0..090cf306b3c3 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -10,7 +10,6 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; @@ -85,6 +84,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettingProviders; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.ShardLongFieldRange; @@ -241,7 +241,7 @@ public Transport.Connection getConnection(DiscoveryNode node) { ) { // metadata upgrader should do nothing @Override - public IndexMetadata verifyIndexMetadata(IndexMetadata indexMetadata, Version minimumIndexCompatibilityVersion) { + public IndexMetadata verifyIndexMetadata(IndexMetadata indexMetadata, IndexVersion minimumIndexCompatibilityVersion) { return indexMetadata; } }; diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index 095cd38288a1..277d0472d738 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -532,7 +532,9 @@ private IndicesClusterStateService createIndicesClusterStateService( threadPool, List.of() ); + final NodeClient client = mock(NodeClient.class); final PeerRecoveryTargetService recoveryTargetService = new PeerRecoveryTargetService( + client, threadPool, transportService, null, @@ -541,7 +543,6 @@ private IndicesClusterStateService createIndicesClusterStateService( ); final ShardStateAction shardStateAction = mock(ShardStateAction.class); final PrimaryReplicaSyncer primaryReplicaSyncer = mock(PrimaryReplicaSyncer.class); - final NodeClient client = mock(NodeClient.class); return new IndicesClusterStateService( settings, indicesService, diff --git a/server/src/test/java/org/elasticsearch/node/NodeTests.java b/server/src/test/java/org/elasticsearch/node/NodeTests.java index 665ac7b5d576..c2c6671ee887 100644 --- a/server/src/test/java/org/elasticsearch/node/NodeTests.java +++ b/server/src/test/java/org/elasticsearch/node/NodeTests.java @@ -444,7 +444,8 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { List components = new ArrayList<>(); components.add(new PluginComponentBinding<>(MyInterface.class, getRandomBool() ? new Foo() : new Bar())); diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginIntrospectorTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginIntrospectorTests.java index c7bcbe18f003..a6142499bc89 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginIntrospectorTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginIntrospectorTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.TokenFilterFactory; import org.elasticsearch.index.engine.EngineFactory; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.analysis.AnalysisModule; import org.elasticsearch.indices.breaker.BreakerSettings; import org.elasticsearch.indices.recovery.plan.RecoveryPlannerService; @@ -271,7 +272,8 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { return null; } diff --git a/server/src/test/java/org/elasticsearch/plugins/UberModuleClassLoaderTests.java b/server/src/test/java/org/elasticsearch/plugins/UberModuleClassLoaderTests.java index 6359b9425c93..e3cd11c8f3b6 100644 --- a/server/src/test/java/org/elasticsearch/plugins/UberModuleClassLoaderTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/UberModuleClassLoaderTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.plugins; -import org.apache.lucene.util.Constants; import org.elasticsearch.common.Strings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.compiler.InMemoryJavaCompiler; @@ -16,6 +15,7 @@ import java.io.IOException; import java.lang.module.Configuration; +import java.lang.module.ModuleDescriptor; import java.lang.module.ModuleFinder; import java.net.MalformedURLException; import java.net.URL; @@ -40,6 +40,8 @@ @ESTestCase.WithoutSecurityManager public class UberModuleClassLoaderTests extends ESTestCase { + private static Set loaders = new HashSet<>(); + /** * Test the loadClass method, which is the real entrypoint for users of the classloader */ @@ -466,51 +468,116 @@ public static String demo() { JarUtils.createJarWithEntries(jar, jarEntries); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/91609") public void testServiceLoadingWithOptionalDependencies() throws Exception { - assumeFalse("Tests frequently fail on Windows", Constants.WINDOWS); try (UberModuleClassLoader loader = getServiceTestLoader(true)) { + // check module descriptor + ModuleDescriptor synthetic = loader.getLayer().findModule("synthetic").orElseThrow().getDescriptor(); + + assertThat( + synthetic.uses(), + equalTo( + Set.of("p.required.LetterService", "p.optional.AnimalService", "q.jar.one.NumberService", "q.jar.two.FooBarService") + ) + ); + // the descriptor model uses a list ordering that we don't guarantee, so we convert the provider list to maps and sets + Map> serviceProviders = synthetic.provides() + .stream() + .collect(Collectors.toMap(ModuleDescriptor.Provides::service, provides -> new HashSet<>(provides.providers()))); + assertThat( + serviceProviders, + equalTo( + Map.of( + "p.required.LetterService", + Set.of("q.jar.one.JarOneProvider", "q.jar.two.JarTwoProvider"), + // optional dependencies found and added + "p.optional.AnimalService", + Set.of("q.jar.one.JarOneOptionalProvider", "q.jar.two.JarTwoOptionalProvider"), + "q.jar.one.NumberService", + Set.of("q.jar.one.JarOneProvider", "q.jar.two.JarTwoProvider"), + "q.jar.two.FooBarService", + Set.of("q.jar.two.JarTwoProvider") + ) + ) + ); + + // Now let's make sure the module system lets us load available services Class serviceCallerClass = loader.loadClass("q.caller.ServiceCaller"); Object instance = serviceCallerClass.getConstructor().newInstance(); var requiredParent = serviceCallerClass.getMethod("callServiceFromRequiredParent"); assertThat(requiredParent.invoke(instance), equalTo("AB")); var optionalParent = serviceCallerClass.getMethod("callServiceFromOptionalParent"); - assertThat(optionalParent.invoke(instance), equalTo("catdog")); + assertThat(optionalParent.invoke(instance), equalTo("catdog")); // our service provider worked var modular = serviceCallerClass.getMethod("callServiceFromModularJar"); assertThat(modular.invoke(instance), equalTo("12")); var nonModular = serviceCallerClass.getMethod("callServiceFromNonModularJar"); assertThat(nonModular.invoke(instance), equalTo("foo")); + } finally { + for (URLClassLoader loader : loaders) { + loader.close(); + } + loaders = new HashSet<>(); } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/91609") public void testServiceLoadingWithoutOptionalDependencies() throws Exception { - assumeFalse("Tests frequently fail on Windows", Constants.WINDOWS); try (UberModuleClassLoader loader = getServiceTestLoader(false)) { + // check module descriptor + ModuleDescriptor synthetic = loader.getLayer().findModule("synthetic").orElseThrow().getDescriptor(); + assertThat(synthetic.uses(), equalTo(Set.of("p.required.LetterService", "q.jar.one.NumberService", "q.jar.two.FooBarService"))); + // the descriptor model uses a list ordering that we don't guarantee, so we convert the provider list to maps and sets + Map> serviceProviders = synthetic.provides() + .stream() + .collect(Collectors.toMap(ModuleDescriptor.Provides::service, provides -> new HashSet<>(provides.providers()))); + assertThat( + serviceProviders, + equalTo( + Map.of( + "p.required.LetterService", + Set.of("q.jar.one.JarOneProvider", "q.jar.two.JarTwoProvider"), + "q.jar.one.NumberService", + Set.of("q.jar.one.JarOneProvider", "q.jar.two.JarTwoProvider"), + "q.jar.two.FooBarService", + Set.of("q.jar.two.JarTwoProvider") + ) + ) + ); + + // Now let's make sure the module system lets us load available services Class serviceCallerClass = loader.loadClass("q.caller.ServiceCaller"); Object instance = serviceCallerClass.getConstructor().newInstance(); var requiredParent = serviceCallerClass.getMethod("callServiceFromRequiredParent"); assertThat(requiredParent.invoke(instance), equalTo("AB")); var optionalParent = serviceCallerClass.getMethod("callServiceFromOptionalParent"); + // service not found at runtime, so we don't try to load the provider assertThat(optionalParent.invoke(instance), equalTo("Optional AnimalService dependency not present at runtime.")); var modular = serviceCallerClass.getMethod("callServiceFromModularJar"); assertThat(modular.invoke(instance), equalTo("12")); var nonModular = serviceCallerClass.getMethod("callServiceFromNonModularJar"); assertThat(nonModular.invoke(instance), equalTo("foo")); + } finally { + for (URLClassLoader loader : loaders) { + loader.close(); + } + loaders = new HashSet<>(); } } - /** - * We need to create a test scenario that covers four service loading situations: + /* + * A class in our ubermodule may use SPI to load a service. Our test scenario needs to work out the following four + * conditions: + * * 1. Service defined in package exported in parent layer. * 2. Service defined in a compile-time dependency, optionally present at runtime. * 3. Service defined in modular jar in uberjar * 4. Service defined in non-modular jar in uberjar * + * In all these cases, our ubermodule should declare that it uses each service *available at runtime*, and that + * it provides these services with the correct providers. + * * We create a jar for each scenario, plus "service caller" jar with a demo class, then * create an UberModuleClassLoader for the relevant jars. */ @@ -525,11 +592,18 @@ private static UberModuleClassLoader getServiceTestLoader(boolean includeOptiona .configuration() .resolve(parentModuleFinder, ModuleFinder.of(), moduleNames); - ModuleLayer parentLayer = ModuleLayer.defineModulesWithOneLoader( - parentLayerConfiguration, - List.of(ModuleLayer.boot()), - UberModuleClassLoaderTests.class.getClassLoader() - ).layer(); + URLClassLoader parentLoader = new URLClassLoader(new URL[] { pathToUrlUnchecked(parentJar) }); + loaders.add(parentLoader); + URLClassLoader optionalLoader = new URLClassLoader(new URL[] { pathToUrlUnchecked(optionalJar) }, parentLoader); + loaders.add(optionalLoader); + ModuleLayer parentLayer = ModuleLayer.defineModules(parentLayerConfiguration, List.of(ModuleLayer.boot()), (String moduleName) -> { + if (moduleName.equals("p.required")) { + return parentLoader; + } else if (includeOptionalDeps && moduleName.equals("p.optional")) { + return optionalLoader; + } + return null; + }).layer(); // jars for the ubermodule Path modularJar = createModularizedJarForBundle(libDir); @@ -538,7 +612,7 @@ private static UberModuleClassLoader getServiceTestLoader(boolean includeOptiona Set jarPaths = new HashSet<>(Set.of(modularJar, nonModularJar, serviceCallerJar)); return UberModuleClassLoader.getInstance( - parentLayer.findLoader("p.required"), + parentLayer.findLoader(includeOptionalDeps ? "p.optional" : "p.required"), parentLayer, "synthetic", jarPaths.stream().map(UberModuleClassLoaderTests::pathToUrlUnchecked).collect(Collectors.toSet()), diff --git a/server/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java b/server/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java index 6c9f2193661e..d5bf27fbfc7c 100644 --- a/server/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.util.Maps; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ESTestCase; @@ -110,7 +111,7 @@ public void testAddSnapshots() { newSnapshot, new RepositoryData.SnapshotDetails( randomFrom(SnapshotState.SUCCESS, SnapshotState.PARTIAL, SnapshotState.FAILED), - randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()), + randomFrom(IndexVersion.current(), Version.CURRENT.minimumCompatibilityVersion().indexVersion), randomNonNegativeLong(), randomNonNegativeLong(), randomAlphaOfLength(10) @@ -142,7 +143,7 @@ public void testInitIndices() { snapshotId.getUUID(), new RepositoryData.SnapshotDetails( randomFrom(SnapshotState.values()), - randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()), + randomFrom(IndexVersion.current(), Version.CURRENT.minimumCompatibilityVersion().indexVersion), randomNonNegativeLong(), randomNonNegativeLong(), randomAlphaOfLength(10) @@ -210,7 +211,7 @@ public void testGetSnapshotState() { snapshotId, new RepositoryData.SnapshotDetails( state, - randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()), + randomFrom(IndexVersion.current(), Version.CURRENT.minimumCompatibilityVersion().indexVersion), randomNonNegativeLong(), randomNonNegativeLong(), randomAlphaOfLength(10) @@ -392,7 +393,7 @@ public void testIndexMetaDataToRemoveAfterRemovingSnapshotWithSharing() { final RepositoryData.SnapshotDetails details = new RepositoryData.SnapshotDetails( SnapshotState.SUCCESS, - Version.CURRENT, + IndexVersion.current(), randomNonNegativeLong(), randomNonNegativeLong(), randomAlphaOfLength(10) @@ -455,7 +456,7 @@ public static RepositoryData generateRandomRepoData() { snapshotId, new RepositoryData.SnapshotDetails( randomFrom(SnapshotState.values()), - randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()), + randomFrom(IndexVersion.current(), Version.CURRENT.minimumCompatibilityVersion().indexVersion), randomNonNegativeLong(), randomNonNegativeLong(), randomAlphaOfLength(10) diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java index 1b44b7576b39..be0f7fbea539 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.Plugin; @@ -303,7 +304,7 @@ public void testRepositoryDataDetails() throws Exception { final Consumer snapshotDetailsAsserter = snapshotDetails -> { assertThat(snapshotDetails.getSnapshotState(), equalTo(SnapshotState.PARTIAL)); - assertThat(snapshotDetails.getVersion(), equalTo(Version.CURRENT)); + assertThat(snapshotDetails.getVersion(), equalTo(IndexVersion.current())); assertThat(snapshotDetails.getStartTimeMillis(), allOf(greaterThanOrEqualTo(beforeStartTime), lessThanOrEqualTo(afterEndTime))); assertThat( snapshotDetails.getEndTimeMillis(), @@ -327,7 +328,7 @@ public void testRepositoryDataDetails() throws Exception { repositoryData.withExtraDetails( Collections.singletonMap( snapshotId, - new RepositoryData.SnapshotDetails(SnapshotState.PARTIAL, Version.CURRENT, -1, -1, null) + new RepositoryData.SnapshotDetails(SnapshotState.PARTIAL, IndexVersion.current(), -1, -1, null) ) ), repositoryData.getGenId() @@ -383,7 +384,7 @@ private RepositoryData addRandomSnapshotsToRepoData(RepositoryData repoData, boo .collect(Collectors.toMap(Function.identity(), ind -> randomAlphaOfLength(256))); final RepositoryData.SnapshotDetails details = new RepositoryData.SnapshotDetails( randomFrom(SnapshotState.SUCCESS, SnapshotState.PARTIAL, SnapshotState.FAILED), - Version.CURRENT, + IndexVersion.current(), randomNonNegativeLong(), randomNonNegativeLong(), randomAlphaOfLength(10) diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/ShardSnapshotTaskRunnerTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/ShardSnapshotTaskRunnerTests.java index 304cc85876ec..a5e924eebb1c 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/ShardSnapshotTaskRunnerTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/ShardSnapshotTaskRunnerTests.java @@ -102,7 +102,7 @@ public int finishedShardSnapshotTasks() { public static BlobStoreIndexShardSnapshot.FileInfo dummyFileInfo() { String filename = randomAlphaOfLength(10); - StoreFileMetadata metadata = new StoreFileMetadata(filename, 10, "CHECKSUM", IndexVersion.CURRENT.luceneVersion().toString()); + StoreFileMetadata metadata = new StoreFileMetadata(filename, 10, "CHECKSUM", IndexVersion.current().luceneVersion().toString()); return new BlobStoreIndexShardSnapshot.FileInfo(filename, metadata, null); } diff --git a/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java b/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java index c50413fede70..18dc565d77c0 100644 --- a/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java @@ -40,6 +40,7 @@ import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; @@ -125,7 +126,7 @@ public void testSnapshotAndRestore() throws IOException { ShardRouting routing = ShardRouting.newUnassigned( shardId, true, - new RecoverySource.SnapshotRecoverySource("test", new Snapshot("foo", snapshotId), Version.CURRENT, indexId), + new RecoverySource.SnapshotRecoverySource("test", new Snapshot("foo", snapshotId), IndexVersion.current(), indexId), new UnassignedInfo(UnassignedInfo.Reason.EXISTING_INDEX_RESTORED, ""), ShardRouting.Role.DEFAULT ); diff --git a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java index a679d2feab93..a4c5810d8824 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java @@ -59,6 +59,7 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; import static org.elasticsearch.rest.RestController.ELASTIC_PRODUCT_HTTP_HEADER; import static org.elasticsearch.rest.RestController.ELASTIC_PRODUCT_HTTP_HEADER_VALUE; @@ -107,7 +108,7 @@ public void setup() { HttpServerTransport httpServerTransport = new TestHttpServerTransport(); client = new NoOpNodeClient(this.getTestName()); tracer = mock(Tracer.class); - restController = new RestController(null, client, circuitBreakerService, usageService, tracer, false); + restController = new RestController(null, client, circuitBreakerService, usageService, tracer); restController.registerHandler( new Route(GET, "/"), (request, channel, client) -> channel.sendResponse( @@ -129,7 +130,7 @@ public void teardown() throws IOException { public void testApplyProductSpecificResponseHeaders() { final ThreadContext threadContext = client.threadPool().getThreadContext(); - final RestController restController = new RestController(null, null, circuitBreakerService, usageService, tracer, false); + final RestController restController = new RestController(null, null, circuitBreakerService, usageService, tracer); RestRequest fakeRequest = new FakeRestRequest.Builder(xContentRegistry()).build(); AssertingChannel channel = new AssertingChannel(fakeRequest, false, RestStatus.BAD_REQUEST); restController.dispatchRequest(fakeRequest, channel, threadContext); @@ -145,7 +146,7 @@ public void testRequestWithDisallowedMultiValuedHeader() { Set headers = new HashSet<>( Arrays.asList(new RestHeaderDefinition("header.1", true), new RestHeaderDefinition("header.2", false)) ); - final RestController restController = new RestController(null, null, circuitBreakerService, usageService, tracer, false); + final RestController restController = new RestController(null, null, circuitBreakerService, usageService, tracer); Map> restHeaders = new HashMap<>(); restHeaders.put("header.1", Collections.singletonList("boo")); restHeaders.put("header.2", List.of("foo", "bar")); @@ -160,7 +161,7 @@ public void testRequestWithDisallowedMultiValuedHeader() { */ public void testDispatchStartsTrace() { final ThreadContext threadContext = client.threadPool().getThreadContext(); - final RestController restController = new RestController(null, null, circuitBreakerService, usageService, tracer, false); + final RestController restController = new RestController(null, null, circuitBreakerService, usageService, tracer); RestRequest fakeRequest = new FakeRestRequest.Builder(xContentRegistry()).build(); final RestController spyRestController = spy(restController); when(spyRestController.getAllHandlers(null, fakeRequest.rawPath())).thenReturn(new Iterator<>() { @@ -189,7 +190,7 @@ public void testRequestWithDisallowedMultiValuedHeaderButSameValues() { Set headers = new HashSet<>( Arrays.asList(new RestHeaderDefinition("header.1", true), new RestHeaderDefinition("header.2", false)) ); - final RestController restController = new RestController(null, client, circuitBreakerService, usageService, tracer, false); + final RestController restController = new RestController(null, client, circuitBreakerService, usageService, tracer); Map> restHeaders = new HashMap<>(); restHeaders.put("header.1", Collections.singletonList("boo")); restHeaders.put("header.2", List.of("foo", "foo")); @@ -260,7 +261,7 @@ public void testRegisterAsReplacedHandler() { } public void testRegisterSecondMethodWithDifferentNamedWildcard() { - final RestController restController = new RestController(null, null, circuitBreakerService, usageService, tracer, false); + final RestController restController = new RestController(null, null, circuitBreakerService, usageService, tracer); RestRequest.Method firstMethod = randomFrom(methodList); RestRequest.Method secondMethod = randomFrom(methodList.stream().filter(m -> m != firstMethod).toList()); @@ -287,7 +288,7 @@ public void testRestHandlerWrapper() throws Exception { final RestController restController = new RestController(h -> { assertSame(handler, h); return (RestRequest request, RestChannel channel, NodeClient client) -> wrapperCalled.set(true); - }, client, circuitBreakerService, usageService, tracer, false); + }, client, circuitBreakerService, usageService, tracer); restController.registerHandler(new Route(GET, "/wrapped"), handler); RestRequest request = testRestRequest("/wrapped", "{}", XContentType.JSON); AssertingChannel channel = new AssertingChannel(request, true, RestStatus.BAD_REQUEST); @@ -374,7 +375,7 @@ public void testDispatchRequiresContentTypeForRequestsWithContent() { String content = randomAlphaOfLength((int) Math.round(BREAKER_LIMIT.getBytes() / inFlightRequestsBreaker.getOverhead())); RestRequest request = testRestRequest("/", content, null); AssertingChannel channel = new AssertingChannel(request, true, RestStatus.NOT_ACCEPTABLE); - restController = new RestController(null, null, circuitBreakerService, usageService, tracer, false); + restController = new RestController(null, null, circuitBreakerService, usageService, tracer); restController.registerHandler( new Route(GET, "/"), (r, c, client) -> c.sendResponse(new RestResponse(RestStatus.OK, RestResponse.TEXT_CONTENT_TYPE, BytesArray.EMPTY)) @@ -761,7 +762,7 @@ public Method method() { public void testDispatchCompatibleHandler() { - RestController restController = new RestController(null, client, circuitBreakerService, usageService, tracer, false); + RestController restController = new RestController(null, client, circuitBreakerService, usageService, tracer); final RestApiVersion version = RestApiVersion.minimumSupported(); @@ -785,7 +786,7 @@ public void testDispatchCompatibleHandler() { public void testDispatchCompatibleRequestToNewlyAddedHandler() { - RestController restController = new RestController(null, client, circuitBreakerService, usageService, tracer, false); + RestController restController = new RestController(null, client, circuitBreakerService, usageService, tracer); final RestApiVersion version = RestApiVersion.minimumSupported(); @@ -820,7 +821,7 @@ private FakeRestRequest requestWithContent(String mediaType) { } public void testCurrentVersionVNDMediaTypeIsNotUsingCompatibility() { - RestController restController = new RestController(null, client, circuitBreakerService, usageService, tracer, false); + RestController restController = new RestController(null, client, circuitBreakerService, usageService, tracer); final RestApiVersion version = RestApiVersion.current(); @@ -845,7 +846,7 @@ public void testCurrentVersionVNDMediaTypeIsNotUsingCompatibility() { } public void testCustomMediaTypeValidation() { - RestController restController = new RestController(null, client, circuitBreakerService, usageService, tracer, false); + RestController restController = new RestController(null, client, circuitBreakerService, usageService, tracer); final String mediaType = "application/x-protobuf"; FakeRestRequest fakeRestRequest = requestWithContent(mediaType); @@ -871,7 +872,7 @@ public void handleRequest(RestRequest request, RestChannel channel, NodeClient c } public void testBrowserSafelistedContentTypesAreRejected() { - RestController restController = new RestController(null, client, circuitBreakerService, usageService, tracer, false); + RestController restController = new RestController(null, client, circuitBreakerService, usageService, tracer); final String mediaType = randomFrom(RestController.SAFELISTED_MEDIA_TYPES); FakeRestRequest fakeRestRequest = requestWithContent(mediaType); @@ -892,7 +893,7 @@ public void handleRequest(RestRequest request, RestChannel channel, NodeClient c } public void testRegisterWithReservedPath() { - final RestController restController = new RestController(null, client, circuitBreakerService, usageService, tracer, false); + final RestController restController = new RestController(null, client, circuitBreakerService, usageService, tracer); for (String path : RestController.RESERVED_PATHS) { IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> { restController.registerHandler( @@ -910,7 +911,7 @@ public void testRegisterWithReservedPath() { * Test that when serverless is disabled, all endpoints are available regardless of ServerlessScope annotations. */ public void testApiProtectionWithServerlessDisabled() { - final RestController restController = new RestController(null, client, circuitBreakerService, new UsageService(), tracer, false); + final RestController restController = new RestController(null, client, circuitBreakerService, new UsageService(), tracer); restController.registerHandler(new PublicRestHandler()); restController.registerHandler(new InternalRestHandler()); restController.registerHandler(new HiddenRestHandler()); @@ -926,22 +927,38 @@ public void testApiProtectionWithServerlessDisabled() { * Test that when serverless is enabled, a normal user can not access endpoints without a ServerlessScope annotation. */ public void testApiProtectionWithServerlessEnabledAsEndUser() { - final RestController restController = new RestController(null, client, circuitBreakerService, new UsageService(), tracer, true); + final RestController restController = new RestController(null, client, circuitBreakerService, new UsageService(), tracer); restController.registerHandler(new PublicRestHandler()); restController.registerHandler(new InternalRestHandler()); restController.registerHandler(new HiddenRestHandler()); - List accessiblePaths = List.of("/public", "/internal"); - accessiblePaths.forEach(path -> { + + final Consumer> checkUnprotected = paths -> paths.forEach(path -> { RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withPath(path).build(); AssertingChannel channel = new AssertingChannel(request, false, RestStatus.OK); restController.dispatchRequest(request, channel, new ThreadContext(Settings.EMPTY)); }); - List inaccessiblePaths = List.of("/hidden"); - inaccessiblePaths.forEach(path -> { + final Consumer> checkProtected = paths -> paths.forEach(path -> { RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withPath(path).build(); AssertingChannel channel = new AssertingChannel(request, false, RestStatus.NOT_FOUND); restController.dispatchRequest(request, channel, new ThreadContext(Settings.EMPTY)); }); + + List accessiblePaths = List.of("/public", "/internal"); + List inaccessiblePaths = List.of("/hidden"); + + // API protections are disabled by default + checkUnprotected.accept(accessiblePaths); + checkUnprotected.accept(inaccessiblePaths); + + // API protections can be dynamically enabled + restController.getApiProtections().setEnabled(true); + checkUnprotected.accept(accessiblePaths); + checkProtected.accept(inaccessiblePaths); + + // API protections can be dynamically disabled + restController.getApiProtections().setEnabled(false); + checkUnprotected.accept(accessiblePaths); + checkUnprotected.accept(inaccessiblePaths); } @ServerlessScope(Scope.PUBLIC) diff --git a/server/src/test/java/org/elasticsearch/rest/RestHttpResponseHeadersTests.java b/server/src/test/java/org/elasticsearch/rest/RestHttpResponseHeadersTests.java index 56f75e9f468c..7bdb4f712028 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestHttpResponseHeadersTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestHttpResponseHeadersTests.java @@ -78,7 +78,7 @@ public void testUnsupportedMethodResponseHttpHeader() throws Exception { ); UsageService usageService = new UsageService(); - RestController restController = new RestController(null, null, circuitBreakerService, usageService, Tracer.NOOP, false); + RestController restController = new RestController(null, null, circuitBreakerService, usageService, Tracer.NOOP); // A basic RestHandler handles requests to the endpoint RestHandler restHandler = (request, channel, client) -> channel.sendResponse(new RestResponse(RestStatus.OK, "")); diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryActionTests.java index e3428a0852dc..7ac44cf862e4 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryActionTests.java @@ -54,7 +54,7 @@ public class RestValidateQueryActionTests extends AbstractSearchTestCase { private NodeClient client = new NodeClient(Settings.EMPTY, threadPool); private UsageService usageService = new UsageService(); - private RestController controller = new RestController(null, client, new NoneCircuitBreakerService(), usageService, Tracer.NOOP, false); + private RestController controller = new RestController(null, client, new NoneCircuitBreakerService(), usageService, Tracer.NOOP); private RestValidateQueryAction action = new RestValidateQueryAction(); /** diff --git a/server/src/test/java/org/elasticsearch/script/VectorScoreScriptUtilsTests.java b/server/src/test/java/org/elasticsearch/script/VectorScoreScriptUtilsTests.java index a094109ef028..e57b5039eb11 100644 --- a/server/src/test/java/org/elasticsearch/script/VectorScoreScriptUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/script/VectorScoreScriptUtilsTests.java @@ -49,11 +49,11 @@ public void testFloatVectorClassBindings() throws IOException { IndexVersion.V_7_4_0 ), new BinaryDenseVectorDocValuesField( - BinaryDenseVectorScriptDocValuesTests.wrap(new float[][] { docVector }, ElementType.FLOAT, IndexVersion.CURRENT), + BinaryDenseVectorScriptDocValuesTests.wrap(new float[][] { docVector }, ElementType.FLOAT, IndexVersion.current()), "test", ElementType.FLOAT, dims, - IndexVersion.CURRENT + IndexVersion.current() ), new KnnDenseVectorDocValuesField(KnnDenseVectorScriptDocValuesTests.wrap(new float[][] { docVector }), "test", dims) ); @@ -130,7 +130,7 @@ public void testByteVectorClassBindings() throws IOException { List fields = List.of( new ByteBinaryDenseVectorDocValuesField( - BinaryDenseVectorScriptDocValuesTests.wrap(new float[][] { docVector }, ElementType.BYTE, IndexVersion.CURRENT), + BinaryDenseVectorScriptDocValuesTests.wrap(new float[][] { docVector }, ElementType.BYTE, IndexVersion.current()), "test", ElementType.BYTE, dims @@ -217,15 +217,15 @@ public void testByteVsFloatSimilarity() throws IOException { IndexVersion.V_7_4_0 ), new BinaryDenseVectorDocValuesField( - BinaryDenseVectorScriptDocValuesTests.wrap(new float[][] { docVector }, ElementType.FLOAT, IndexVersion.CURRENT), + BinaryDenseVectorScriptDocValuesTests.wrap(new float[][] { docVector }, ElementType.FLOAT, IndexVersion.current()), "field1", ElementType.FLOAT, dims, - IndexVersion.CURRENT + IndexVersion.current() ), new KnnDenseVectorDocValuesField(KnnDenseVectorScriptDocValuesTests.wrap(new float[][] { docVector }), "field2", dims), new ByteBinaryDenseVectorDocValuesField( - BinaryDenseVectorScriptDocValuesTests.wrap(new float[][] { docVector }, ElementType.BYTE, IndexVersion.CURRENT), + BinaryDenseVectorScriptDocValuesTests.wrap(new float[][] { docVector }, ElementType.BYTE, IndexVersion.current()), "field3", ElementType.BYTE, dims @@ -363,7 +363,7 @@ public void testByteBoundaries() throws IOException { List fields = List.of( new ByteBinaryDenseVectorDocValuesField( - BinaryDenseVectorScriptDocValuesTests.wrap(new float[][] { docVector }, ElementType.BYTE, IndexVersion.CURRENT), + BinaryDenseVectorScriptDocValuesTests.wrap(new float[][] { docVector }, ElementType.BYTE, IndexVersion.current()), "test", ElementType.BYTE, dims diff --git a/server/src/test/java/org/elasticsearch/script/field/vectors/DenseVectorTests.java b/server/src/test/java/org/elasticsearch/script/field/vectors/DenseVectorTests.java index 2f7a9144e6b5..a21f3d3c7e0d 100644 --- a/server/src/test/java/org/elasticsearch/script/field/vectors/DenseVectorTests.java +++ b/server/src/test/java/org/elasticsearch/script/field/vectors/DenseVectorTests.java @@ -67,7 +67,7 @@ public void testFloatVsListQueryVector() { assertEquals(knn.cosineSimilarity(arrayQV), knn.cosineSimilarity(listQV), 0.001f); assertEquals(knn.cosineSimilarity((Object) listQV), knn.cosineSimilarity((Object) arrayQV), 0.001f); - for (IndexVersion indexVersion : List.of(IndexVersion.V_7_4_0, IndexVersion.CURRENT)) { + for (IndexVersion indexVersion : List.of(IndexVersion.V_7_4_0, IndexVersion.current())) { BytesRef value = BinaryDenseVectorScriptDocValuesTests.mockEncodeDenseVector(docVector, ElementType.FLOAT, indexVersion); BinaryDenseVector bdv = new BinaryDenseVector(docVector, value, dims, indexVersion); @@ -113,7 +113,7 @@ public void testByteVsListQueryVector() { assertEquals(knn.cosineSimilarity(arrayQV), knn.cosineSimilarity(listQV), 0.001f); assertEquals(knn.cosineSimilarity((Object) listQV), knn.cosineSimilarity((Object) arrayQV), 0.001f); - BytesRef value = BinaryDenseVectorScriptDocValuesTests.mockEncodeDenseVector(floatVector, ElementType.BYTE, IndexVersion.CURRENT); + BytesRef value = BinaryDenseVectorScriptDocValuesTests.mockEncodeDenseVector(floatVector, ElementType.BYTE, IndexVersion.current()); byte[] byteVectorValue = new byte[dims]; System.arraycopy(value.bytes, value.offset, byteVectorValue, 0, dims); ByteBinaryDenseVector bdv = new ByteBinaryDenseVector(byteVectorValue, value, dims); @@ -220,7 +220,7 @@ public void testFloatUnsupported() { e = expectThrows(UnsupportedOperationException.class, () -> knn.cosineSimilarity((Object) queryVector)); assertEquals(e.getMessage(), "use [double cosineSimilarity(float[] queryVector, boolean normalizeQueryVector)] instead"); - BinaryDenseVector binary = new BinaryDenseVector(docVector, new BytesRef(docBuffer.array()), dims, IndexVersion.CURRENT); + BinaryDenseVector binary = new BinaryDenseVector(docVector, new BytesRef(docBuffer.array()), dims, IndexVersion.current()); e = expectThrows(UnsupportedOperationException.class, () -> binary.dotProduct(queryVector)); assertEquals(e.getMessage(), "use [double dotProduct(float[] queryVector)] instead"); diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java index 521fcd91ffd4..98ed02bdd698 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -11,9 +11,12 @@ import org.apache.lucene.index.FilterDirectoryReader; import org.apache.lucene.index.LeafReader; import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; import org.apache.lucene.store.AlreadyClosedException; +import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.index.IndexResponse; @@ -39,6 +42,7 @@ import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.TimeValue; @@ -48,6 +52,7 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.MatchNoneQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -77,6 +82,7 @@ import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.builder.PointInTimeBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.fetch.ShardFetchRequest; import org.elasticsearch.search.fetch.subphase.FieldAndFormat; @@ -85,6 +91,7 @@ import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.internal.ShardSearchRequest; +import org.elasticsearch.search.query.QuerySearchRequest; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.suggest.SuggestBuilder; import org.elasticsearch.tasks.TaskCancelHelper; @@ -112,6 +119,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; import java.util.function.Function; +import java.util.function.Supplier; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; @@ -1861,6 +1869,47 @@ public void testMinimalSearchSourceInShardRequests() { } } + public void testDfsQueryPhaseRewrite() { + createIndex("index"); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + final SearchService service = getInstanceFromNode(SearchService.class); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + final IndexShard indexShard = indexService.getShard(0); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + searchRequest.source(SearchSourceBuilder.searchSource().query(new TestRewriteCounterQueryBuilder())); + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + -1, + null + ); + PlainActionFuture plainActionFuture = new PlainActionFuture<>(); + final Engine.SearcherSupplier reader = indexShard.acquireSearcherSupplier(); + ReaderContext context = service.createAndPutReaderContext( + request, + indexService, + indexShard, + reader, + SearchService.KEEPALIVE_INTERVAL_SETTING.get(Settings.EMPTY).millis() + ); + service.executeQueryPhase( + new QuerySearchRequest(null, context.id(), request, new AggregatedDfs(Map.of(), Map.of(), 10)), + new SearchShardTask(42L, "", "", "", null, Collections.emptyMap()), + plainActionFuture + ); + + plainActionFuture.actionGet(); + assertThat(((TestRewriteCounterQueryBuilder) request.source().query()).asyncRewriteCount, equalTo(1)); + final ShardSearchContextId contextId = context.id(); + assertTrue(service.freeReaderContext(contextId)); + } + private ReaderContext createReaderContext(IndexService indexService, IndexShard indexShard) { return new ReaderContext( new ShardSearchContextId(UUIDs.randomBase64UUID(), randomNonNegativeLong()), @@ -1871,4 +1920,74 @@ private ReaderContext createReaderContext(IndexService indexService, IndexShard false ); } + + private static class TestRewriteCounterQueryBuilder extends AbstractQueryBuilder { + + final int asyncRewriteCount; + final Supplier fetched; + + TestRewriteCounterQueryBuilder() { + asyncRewriteCount = 0; + fetched = null; + } + + private TestRewriteCounterQueryBuilder(int asyncRewriteCount, Supplier fetched) { + this.asyncRewriteCount = asyncRewriteCount; + this.fetched = fetched; + } + + @Override + public String getWriteableName() { + return "test_query"; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.ZERO; + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException {} + + @Override + protected void doXContent(XContentBuilder builder, Params params) throws IOException {} + + @Override + protected Query doToQuery(SearchExecutionContext context) throws IOException { + return new MatchAllDocsQuery(); + } + + @Override + protected boolean doEquals(TestRewriteCounterQueryBuilder other) { + return true; + } + + @Override + protected int doHashCode() { + return 42; + } + + @Override + protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws IOException { + if (asyncRewriteCount > 0) { + return this; + } + if (fetched != null) { + if (fetched.get() == null) { + return this; + } + assert fetched.get(); + return new TestRewriteCounterQueryBuilder(1, null); + } + if (queryRewriteContext.convertToDataRewriteContext() != null) { + SetOnce awaitingFetch = new SetOnce<>(); + queryRewriteContext.registerAsyncAction((c, l) -> { + awaitingFetch.set(true); + l.onResponse(null); + }); + return new TestRewriteCounterQueryBuilder(0, awaitingFetch::get); + } + return this; + } + } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java index 5a8f8f28cfd8..f95734867378 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java @@ -500,7 +500,7 @@ public void testNested() throws IOException { assertThat(filters1.getBucketByKey("q1").getDocCount(), equalTo(1L)); }, new AggTestConfig(new FiltersAggregationBuilder("test", new KeyedFilter("q1", new TermQueryBuilder("author", "foo"))), ft) - .withQuery(Queries.newNonNestedFilter(IndexVersion.CURRENT)) + .withQuery(Queries.newNonNestedFilter(IndexVersion.current())) ); testCase(buildIndex, result -> { InternalFilters filters = (InternalFilters) result; @@ -508,7 +508,7 @@ public void testNested() throws IOException { assertThat(filters.getBucketByKey("q1").getDocCount(), equalTo(1L)); }, new AggTestConfig(new FiltersAggregationBuilder("test", new KeyedFilter("q1", new MatchAllQueryBuilder())), ft).withQuery( - Queries.newNonNestedFilter(IndexVersion.CURRENT) + Queries.newNonNestedFilter(IndexVersion.current()) ) ); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java index 15a0c0342ee5..6324d40525cf 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java @@ -367,7 +367,7 @@ public void testResetRootDocId() throws Exception { MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(VALUE_FIELD_NAME, NumberFieldMapper.NumberType.LONG); BooleanQuery.Builder bq = new BooleanQuery.Builder(); - bq.add(Queries.newNonNestedFilter(IndexVersion.CURRENT), BooleanClause.Occur.MUST); + bq.add(Queries.newNonNestedFilter(IndexVersion.current()), BooleanClause.Occur.MUST); bq.add(new TermQuery(new Term(IdFieldMapper.NAME, Uid.encodeId("2"))), BooleanClause.Occur.MUST_NOT); InternalNested nested = searchAndReduce( @@ -644,7 +644,7 @@ public void testPreGetChildLeafCollectors() throws IOException { Filter filter = searchAndReduce( newIndexSearcher(indexReader), new AggTestConfig(filterAggregationBuilder, fieldType1, fieldType2).withQuery( - Queries.newNonNestedFilter(IndexVersion.CURRENT) + Queries.newNonNestedFilter(IndexVersion.current()) ) ); @@ -906,6 +906,6 @@ protected List objectMappers() { ); public static NestedObjectMapper nestedObject(String path) { - return new NestedObjectMapper.Builder(path, IndexVersion.CURRENT).build(MapperBuilderContext.root(false)); + return new NestedObjectMapper.Builder(path, IndexVersion.current()).build(MapperBuilderContext.root(false)); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java index 1bffbbc5e0b2..dc34ae2d59a0 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java @@ -1438,7 +1438,7 @@ public void testHeisenpig() throws IOException { StringTerms result = searchAndReduce( newIndexSearcher(indexReader), new AggTestConfig(terms, animalFieldType, nestedFieldType).withQuery( - Queries.newNonNestedFilter(IndexVersion.CURRENT) + Queries.newNonNestedFilter(IndexVersion.current()) ) ); assertThat(result.getBuckets().get(0).getKeyAsString(), equalTo("pig")); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorTests.java index 5cf8dade5354..5671bb975118 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorTests.java @@ -127,7 +127,7 @@ public void testQueryFiltering() throws IOException { writer.addDocument(Arrays.asList(new IntPoint(FIELD_NAME, point), new SortedNumericDocValuesField(FIELD_NAME, point))); } }, agg -> { - assertThat(agg.getMedianAbsoluteDeviation(), closeToRelative(calculateMAD(filteredSample))); + assertThat(agg.getMedianAbsoluteDeviation(), closeToRelative(calculateMAD(filteredSample), 0.2)); assertTrue(AggregationInspectionHelper.hasValue(agg)); }); } diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/FieldFetcherTests.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/FieldFetcherTests.java index 82bce09ae03d..0de84728e5f7 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/FieldFetcherTests.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/FieldFetcherTests.java @@ -266,7 +266,7 @@ public void testMetadataFields() throws IOException { SeqNoFieldMapper.NAME, SourceFieldMapper.NAME, FieldNamesFieldMapper.NAME, - NestedPathFieldMapper.name(IndexVersion.CURRENT) + NestedPathFieldMapper.name(IndexVersion.current()) )) { expectThrows(UnsupportedOperationException.class, () -> fetchFields(mapperService, source, fieldname)); } diff --git a/server/src/test/java/org/elasticsearch/search/query/ScriptScoreQueryTests.java b/server/src/test/java/org/elasticsearch/search/query/ScriptScoreQueryTests.java index eeb0c066f762..6fff108cfb5c 100644 --- a/server/src/test/java/org/elasticsearch/search/query/ScriptScoreQueryTests.java +++ b/server/src/test/java/org/elasticsearch/search/query/ScriptScoreQueryTests.java @@ -88,7 +88,7 @@ public void testExplain() throws IOException { null, "index", 0, - IndexVersion.CURRENT + IndexVersion.current() ); Weight weight = query.createWeight(searcher, ScoreMode.COMPLETE, 1.0f); Explanation explanation = weight.explain(leafReaderContext, 0); @@ -109,7 +109,7 @@ public void testExplainDefault() throws IOException { null, "index", 0, - IndexVersion.CURRENT + IndexVersion.current() ); Weight weight = query.createWeight(searcher, ScoreMode.COMPLETE, 1.0f); Explanation explanation = weight.explain(leafReaderContext, 0); @@ -134,7 +134,7 @@ public void testExplainDefaultNoScore() throws IOException { null, "index", 0, - IndexVersion.CURRENT + IndexVersion.current() ); Weight weight = query.createWeight(searcher, ScoreMode.COMPLETE, 1.0f); Explanation explanation = weight.explain(leafReaderContext, 0); @@ -157,7 +157,7 @@ public void testScriptScoreErrorOnNegativeScore() { null, "index", 0, - IndexVersion.CURRENT + IndexVersion.current() ); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> searcher.search(query, 1)); assertTrue(e.getMessage().contains("Must be a non-negative score!")); diff --git a/server/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java b/server/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java index 328af99689b6..a278a01854b7 100644 --- a/server/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java +++ b/server/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java @@ -187,7 +187,7 @@ protected final SearchExecutionContext createMockSearchExecutionContext(IndexSea Index index = new Index(randomAlphaOfLengthBetween(1, 10), "_na_"); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings( index, - Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.CURRENT.id()).build() + Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current().id()).build() ); BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(idxSettings, mock(BitsetFilterCache.Listener.class)); BiFunction> indexFieldDataLookup = (fieldType, fdc) -> { @@ -195,7 +195,7 @@ protected final SearchExecutionContext createMockSearchExecutionContext(IndexSea return builder.build(new IndexFieldDataCache.None(), null); }; NestedLookup nestedLookup = NestedLookup.build( - List.of(new NestedObjectMapper.Builder("path", IndexVersion.CURRENT).build(MapperBuilderContext.root(false))) + List.of(new NestedObjectMapper.Builder("path", IndexVersion.current()).build(MapperBuilderContext.root(false))) ); return new SearchExecutionContext( 0, diff --git a/server/src/test/java/org/elasticsearch/snapshots/InternalSnapshotsInfoServiceTests.java b/server/src/test/java/org/elasticsearch/snapshots/InternalSnapshotsInfoServiceTests.java index a8badfaa711f..fdb43de69a66 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/InternalSnapshotsInfoServiceTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/InternalSnapshotsInfoServiceTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.repositories.FilterRepository; @@ -424,7 +425,7 @@ private ClusterState addUnassignedShards(final ClusterState currentState, String final RecoverySource.SnapshotRecoverySource recoverySource = new RecoverySource.SnapshotRecoverySource( UUIDs.randomBase64UUID(random()), new Snapshot("_repo", new SnapshotId(randomAlphaOfLength(5), UUIDs.randomBase64UUID(random()))), - Version.CURRENT, + IndexVersion.current(), new IndexId(indexName, UUIDs.randomBase64UUID(random())) ); diff --git a/server/src/test/java/org/elasticsearch/snapshots/RestoreServiceTests.java b/server/src/test/java/org/elasticsearch/snapshots/RestoreServiceTests.java index a1f39c4f16be..fee89d54d794 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/RestoreServiceTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/RestoreServiceTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.snapshots; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; import org.elasticsearch.cluster.metadata.DataStream; @@ -20,6 +19,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Maps; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; @@ -207,7 +207,7 @@ private static SnapshotInfo createSnapshotInfo(Snapshot snapshot, Boolean includ List.of(), List.of(), randomAlphaOfLengthBetween(10, 100), - Version.CURRENT, + IndexVersion.current(), randomNonNegativeLong(), randomNonNegativeLong(), shards, diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index 6b23545dc568..366bb13e609f 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -1863,7 +1863,14 @@ protected void assertSnapshotOrGenericThread() { indicesService, clusterService, threadPool, - new PeerRecoveryTargetService(threadPool, transportService, recoverySettings, clusterService, snapshotFilesProvider), + new PeerRecoveryTargetService( + client, + threadPool, + transportService, + recoverySettings, + clusterService, + snapshotFilesProvider + ), shardStateAction, repositoriesService, searchService, diff --git a/server/src/test/java/org/elasticsearch/tasks/BanFailureLoggingTests.java b/server/src/test/java/org/elasticsearch/tasks/BanFailureLoggingTests.java index dcefd6313412..022961bab787 100644 --- a/server/src/test/java/org/elasticsearch/tasks/BanFailureLoggingTests.java +++ b/server/src/test/java/org/elasticsearch/tasks/BanFailureLoggingTests.java @@ -11,10 +11,10 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.node.tasks.TaskManagerTestCase; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; @@ -113,7 +113,7 @@ private void runTest( final MockTransportService parentTransportService = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool ); @@ -124,7 +124,7 @@ private void runTest( final MockTransportService childTransportService = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool ); diff --git a/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java b/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java index 990f35011cdf..8cab2c2a0e2e 100644 --- a/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java +++ b/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.common.settings.AbstractScopedSettings; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; @@ -21,6 +22,7 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.core.Strings; import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TransportVersionUtils; import org.elasticsearch.test.junit.annotations.TestLogging; @@ -60,11 +62,16 @@ public void tearDown() throws Exception { ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); } - private MockTransportService startTransport(String id, Version version, TransportVersion transportVersion) { + private MockTransportService startTransport(String id, VersionInformation version, TransportVersion transportVersion) { return startTransport(id, version, transportVersion, Settings.EMPTY); } - public MockTransportService startTransport(String id, Version version, TransportVersion transportVersion, Settings settings) { + public MockTransportService startTransport( + String id, + VersionInformation version, + TransportVersion transportVersion, + Settings settings + ) { boolean success = false; final Settings s = Settings.builder() .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), clusterAlias) @@ -85,12 +92,17 @@ public MockTransportService startTransport(String id, Version version, Transport } public void testProxyStrategyWillOpenExpectedNumberOfConnectionsToAddress() { - try (MockTransportService transport1 = startTransport("node1", Version.CURRENT, TransportVersion.current())) { + try (MockTransportService transport1 = startTransport("node1", VersionInformation.CURRENT, TransportVersion.current())) { TransportAddress address1 = transport1.boundAddress().publishAddress(); try ( MockTransportService localService = spy( - MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, TransportVersion.current(), threadPool) + MockTransportService.createNewService( + Settings.EMPTY, + VersionInformation.CURRENT, + TransportVersion.current(), + threadPool + ) ) ) { localService.start(); @@ -145,8 +157,8 @@ public void testProxyStrategyWillOpenExpectedNumberOfConnectionsToAddress() { ) public void testProxyStrategyWillOpenNewConnectionsOnDisconnect() throws Exception { try ( - MockTransportService transport1 = startTransport("node1", Version.CURRENT, TransportVersion.current()); - MockTransportService transport2 = startTransport("node2", Version.CURRENT, TransportVersion.current()) + MockTransportService transport1 = startTransport("node1", VersionInformation.CURRENT, TransportVersion.current()); + MockTransportService transport2 = startTransport("node2", VersionInformation.CURRENT, TransportVersion.current()) ) { TransportAddress address1 = transport1.boundAddress().publishAddress(); TransportAddress address2 = transport2.boundAddress().publishAddress(); @@ -154,7 +166,7 @@ public void testProxyStrategyWillOpenNewConnectionsOnDisconnect() throws Excepti try ( MockTransportService localService = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool ) @@ -220,7 +232,11 @@ public void testProxyStrategyWillOpenNewConnectionsOnDisconnect() throws Excepti } public void testConnectFailsWithIncompatibleNodes() { - Version incompatibleVersion = Version.CURRENT.minimumCompatibilityVersion().minimumCompatibilityVersion(); + VersionInformation incompatibleVersion = new VersionInformation( + Version.CURRENT.minimumCompatibilityVersion().minimumCompatibilityVersion(), + IndexVersion.MINIMUM_COMPATIBLE, + IndexVersion.current() + ); TransportVersion incompatibleTransportVersion = TransportVersionUtils.getPreviousVersion(TransportVersion.MINIMUM_COMPATIBLE); try (MockTransportService transport1 = startTransport("incompatible-node", incompatibleVersion, incompatibleTransportVersion)) { TransportAddress address1 = transport1.boundAddress().publishAddress(); @@ -228,7 +244,7 @@ public void testConnectFailsWithIncompatibleNodes() { try ( MockTransportService localService = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool ) @@ -276,13 +292,13 @@ public void testConnectFailsWithIncompatibleNodes() { } public void testConnectFailsWithNonRetryableException() { - try (MockTransportService transport1 = startTransport("remote", Version.CURRENT, TransportVersion.current())) { + try (MockTransportService transport1 = startTransport("remote", VersionInformation.CURRENT, TransportVersion.current())) { TransportAddress address1 = transport1.boundAddress().publishAddress(); try ( MockTransportService localService = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool ) @@ -336,8 +352,13 @@ public void testClusterNameValidationPreventConnectingToDifferentClusters() thro Settings otherSettings = Settings.builder().put("cluster.name", "otherCluster").build(); try ( - MockTransportService transport1 = startTransport("cluster1", Version.CURRENT, TransportVersion.current()); - MockTransportService transport2 = startTransport("cluster2", Version.CURRENT, TransportVersion.current(), otherSettings) + MockTransportService transport1 = startTransport("cluster1", VersionInformation.CURRENT, TransportVersion.current()); + MockTransportService transport2 = startTransport( + "cluster2", + VersionInformation.CURRENT, + TransportVersion.current(), + otherSettings + ) ) { TransportAddress address1 = transport1.boundAddress().publishAddress(); TransportAddress address2 = transport2.boundAddress().publishAddress(); @@ -345,7 +366,7 @@ public void testClusterNameValidationPreventConnectingToDifferentClusters() thro try ( MockTransportService localService = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool ) @@ -407,7 +428,7 @@ public void testClusterNameValidationPreventConnectingToDifferentClusters() thro } public void testProxyStrategyWillResolveAddressesEachConnect() throws Exception { - try (MockTransportService transport1 = startTransport("seed_node", Version.CURRENT, TransportVersion.current())) { + try (MockTransportService transport1 = startTransport("seed_node", VersionInformation.CURRENT, TransportVersion.current())) { TransportAddress address = transport1.boundAddress().publishAddress(); CountDownLatch multipleResolveLatch = new CountDownLatch(2); @@ -419,7 +440,7 @@ public void testProxyStrategyWillResolveAddressesEachConnect() throws Exception try ( MockTransportService localService = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool ) @@ -459,13 +480,13 @@ public void testProxyStrategyWillResolveAddressesEachConnect() throws Exception } public void testProxyStrategyWillNeedToBeRebuiltIfNumOfSocketsOrAddressesOrServerNameChange() { - try (MockTransportService remoteTransport = startTransport("node1", Version.CURRENT, TransportVersion.current())) { + try (MockTransportService remoteTransport = startTransport("node1", VersionInformation.CURRENT, TransportVersion.current())) { TransportAddress remoteAddress = remoteTransport.boundAddress().publishAddress(); try ( MockTransportService localService = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool ) @@ -573,13 +594,15 @@ public void testModeSettingsCannotBeUsedWhenInDifferentMode() { public void testServerNameAttributes() { Settings bindSettings = Settings.builder().put(TransportSettings.BIND_HOST.getKey(), "localhost").build(); - try (MockTransportService transport1 = startTransport("node1", Version.CURRENT, TransportVersion.current(), bindSettings)) { + try ( + MockTransportService transport1 = startTransport("node1", VersionInformation.CURRENT, TransportVersion.current(), bindSettings) + ) { TransportAddress address1 = transport1.boundAddress().publishAddress(); try ( MockTransportService localService = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool ) diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterAwareClientTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterAwareClientTests.java index 3067641af434..74395d4075ac 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterAwareClientTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterAwareClientTests.java @@ -9,7 +9,6 @@ package org.elasticsearch.transport; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchShardsAction; import org.elasticsearch.action.search.SearchShardsRequest; @@ -17,6 +16,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.index.query.MatchAllQueryBuilder; @@ -45,7 +45,13 @@ public void tearDown() throws Exception { } private MockTransportService startTransport(String id, List knownNodes) { - return RemoteClusterConnectionTests.startTransport(id, knownNodes, Version.CURRENT, TransportVersion.current(), threadPool); + return RemoteClusterConnectionTests.startTransport( + id, + knownNodes, + VersionInformation.CURRENT, + TransportVersion.current(), + threadPool + ); } public void testSearchShards() throws Exception { @@ -62,7 +68,7 @@ public void testSearchShards() throws Exception { try ( MockTransportService service = MockTransportService.createNewService( builder.build(), - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, null @@ -110,7 +116,7 @@ public void testSearchShardsThreadContextHeader() { try ( MockTransportService service = MockTransportService.createNewService( builder.build(), - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, null diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java index 9ba67ce4ae4d..980724567a53 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java @@ -8,13 +8,13 @@ package org.elasticsearch.transport; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; +import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESTestCase; @@ -53,7 +53,7 @@ public void testConnectAndExecuteRequest() throws Exception { MockTransportService remoteTransport = startTransport( "remote_node", Collections.emptyList(), - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, remoteSettings @@ -69,7 +69,7 @@ public void testConnectAndExecuteRequest() throws Exception { try ( MockTransportService service = MockTransportService.createNewService( localSettings, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, null @@ -106,7 +106,7 @@ public void testEnsureWeReconnect() throws Exception { MockTransportService remoteTransport = startTransport( "remote_node", Collections.emptyList(), - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, remoteSettings @@ -120,7 +120,7 @@ public void testEnsureWeReconnect() throws Exception { try ( MockTransportService service = MockTransportService.createNewService( localSettings, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, null @@ -157,7 +157,7 @@ public void testRemoteClusterServiceNotEnabled() { try ( MockTransportService service = MockTransportService.createNewService( settings, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, null @@ -180,7 +180,7 @@ public void testQuicklySkipUnavailableClusters() throws Exception { MockTransportService remoteTransport = startTransport( "remote_node", Collections.emptyList(), - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, remoteSettings @@ -197,7 +197,7 @@ public void testQuicklySkipUnavailableClusters() throws Exception { try ( MockTransportService service = MockTransportService.createNewService( localSettings, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, null diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index 4aa5f3a3f28b..5635e0700002 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -10,7 +10,6 @@ import org.apache.lucene.search.TotalHits; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.remote.RemoteClusterNodesAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; @@ -29,6 +28,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; @@ -94,7 +94,7 @@ public void tearDown() throws Exception { private MockTransportService startTransport( String id, List knownNodes, - Version version, + VersionInformation version, TransportVersion transportVersion ) { return startTransport(id, knownNodes, version, transportVersion, threadPool); @@ -103,7 +103,7 @@ private MockTransportService startTransport( public static MockTransportService startTransport( String id, List knownNodes, - Version version, + VersionInformation version, TransportVersion transportVersion, ThreadPool threadPool ) { @@ -113,7 +113,7 @@ public static MockTransportService startTransport( public static MockTransportService startTransport( final String id, final List knownNodes, - final Version version, + final VersionInformation version, final TransportVersion transportVersion, final ThreadPool threadPool, final Settings settings @@ -230,7 +230,7 @@ public void run() { try ( MockTransportService service = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, null @@ -271,12 +271,22 @@ private static List addresses(final DiscoveryNode... seedNodes) { public void testCloseWhileConcurrentlyConnecting() throws IOException, InterruptedException, BrokenBarrierException { List knownNodes = new CopyOnWriteArrayList<>(); try ( - MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT, TransportVersion.current()); - MockTransportService seedTransport1 = startTransport("seed_node_1", knownNodes, Version.CURRENT, TransportVersion.current()); + MockTransportService seedTransport = startTransport( + "seed_node", + knownNodes, + VersionInformation.CURRENT, + TransportVersion.current() + ); + MockTransportService seedTransport1 = startTransport( + "seed_node_1", + knownNodes, + VersionInformation.CURRENT, + TransportVersion.current() + ); MockTransportService discoverableTransport = startTransport( "discoverable_node", knownNodes, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current() ) ) { @@ -292,7 +302,7 @@ public void testCloseWhileConcurrentlyConnecting() throws IOException, Interrupt try ( MockTransportService service = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, null @@ -387,7 +397,7 @@ private void doTestGetConnectionInfo(boolean hasClusterCredentials) throws Excep MockTransportService transport1 = startTransport( "seed_node", knownNodes, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, seedTransportSettings @@ -395,7 +405,7 @@ private void doTestGetConnectionInfo(boolean hasClusterCredentials) throws Excep MockTransportService transport2 = startTransport( "seed_node_1", knownNodes, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, seedTransportSettings @@ -403,7 +413,7 @@ private void doTestGetConnectionInfo(boolean hasClusterCredentials) throws Excep MockTransportService transport3 = startTransport( "discoverable_node", knownNodes, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, seedTransportSettings @@ -427,7 +437,7 @@ private void doTestGetConnectionInfo(boolean hasClusterCredentials) throws Excep try ( MockTransportService service = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, null @@ -596,7 +606,7 @@ private void doTestCollectNodes(boolean hasClusterCredentials) throws Exception MockTransportService seedTransport = startTransport( "seed_node", knownNodes, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, seedTransportSettings @@ -610,7 +620,7 @@ private void doTestCollectNodes(boolean hasClusterCredentials) throws Exception try ( MockTransportService service = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, null @@ -666,13 +676,20 @@ private void doTestCollectNodes(boolean hasClusterCredentials) throws Exception public void testNoChannelsExceptREG() throws Exception { List knownNodes = new CopyOnWriteArrayList<>(); - try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT, TransportVersion.current())) { + try ( + MockTransportService seedTransport = startTransport( + "seed_node", + knownNodes, + VersionInformation.CURRENT, + TransportVersion.current() + ) + ) { DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); knownNodes.add(seedTransport.getLocalDiscoNode()); try ( MockTransportService service = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, null @@ -720,7 +737,7 @@ public void testConnectedNodesConcurrentAccess() throws IOException, Interrupted MockTransportService transportService = startTransport( "discoverable_node" + i, knownNodes, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current() ); discoverableNodes.add(transportService.getLocalNode()); @@ -738,7 +755,7 @@ public void testConnectedNodesConcurrentAccess() throws IOException, Interrupted try ( MockTransportService service = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, null @@ -811,11 +828,16 @@ public void testConnectedNodesConcurrentAccess() throws IOException, Interrupted public void testGetConnection() throws Exception { List knownNodes = new CopyOnWriteArrayList<>(); try ( - MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT, TransportVersion.current()); + MockTransportService seedTransport = startTransport( + "seed_node", + knownNodes, + VersionInformation.CURRENT, + TransportVersion.current() + ); MockTransportService disconnectedTransport = startTransport( "disconnected_node", knownNodes, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current() ) ) { @@ -828,7 +850,7 @@ public void testGetConnection() throws Exception { try ( MockTransportService service = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, null diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java index 9c86b8e5459d..0391e885bb0a 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.transport; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.support.ActionTestUtils; @@ -16,6 +15,7 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; +import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.AbstractScopedSettings; import org.elasticsearch.common.settings.ClusterSettings; @@ -65,7 +65,7 @@ public void tearDown() throws Exception { private MockTransportService startTransport( String id, List knownNodes, - Version version, + VersionInformation version, TransportVersion transportVersion ) { return startTransport(id, knownNodes, version, transportVersion, Settings.EMPTY); @@ -74,7 +74,7 @@ private MockTransportService startTransport( private MockTransportService startTransport( final String id, final List knownNodes, - final Version version, + final VersionInformation version, final TransportVersion transportVersion, final Settings settings ) { @@ -128,13 +128,13 @@ public void testGroupClusterIndices() throws IOException { MockTransportService cluster1Transport = startTransport( "cluster_1_node", knownNodes, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current() ); MockTransportService cluster2Transport = startTransport( "cluster_2_node", knownNodes, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current() ) ) { @@ -147,7 +147,7 @@ public void testGroupClusterIndices() throws IOException { try ( MockTransportService transportService = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, null @@ -209,13 +209,13 @@ public void testGroupIndices() throws IOException { MockTransportService cluster1Transport = startTransport( "cluster_1_node", knownNodes, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current() ); MockTransportService cluster2Transport = startTransport( "cluster_2_node", knownNodes, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current() ) ) { @@ -228,7 +228,7 @@ public void testGroupIndices() throws IOException { try ( MockTransportService transportService = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, null @@ -319,13 +319,13 @@ public void testIncrementallyAddClusters() throws IOException { MockTransportService cluster1Transport = startTransport( "cluster_1_node", knownNodes, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current() ); MockTransportService cluster2Transport = startTransport( "cluster_2_node", knownNodes, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current() ) ) { @@ -338,7 +338,7 @@ public void testIncrementallyAddClusters() throws IOException { try ( MockTransportService transportService = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, null @@ -395,7 +395,12 @@ public void testIncrementallyAddClusters() throws IOException { public void testDefaultPingSchedule() throws IOException { List knownNodes = new CopyOnWriteArrayList<>(); try ( - MockTransportService seedTransport = startTransport("cluster_1_node", knownNodes, Version.CURRENT, TransportVersion.current()) + MockTransportService seedTransport = startTransport( + "cluster_1_node", + knownNodes, + VersionInformation.CURRENT, + TransportVersion.current() + ) ) { DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); knownNodes.add(seedTransport.getLocalDiscoNode()); @@ -412,7 +417,7 @@ public void testDefaultPingSchedule() throws IOException { try ( MockTransportService transportService = MockTransportService.createNewService( settings, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, null @@ -443,13 +448,13 @@ public void testCustomPingSchedule() throws IOException { MockTransportService cluster1Transport = startTransport( "cluster_1_node", knownNodes, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current() ); MockTransportService cluster2Transport = startTransport( "cluster_2_node", knownNodes, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current() ) ) { @@ -467,7 +472,7 @@ public void testCustomPingSchedule() throws IOException { try ( MockTransportService transportService = MockTransportService.createNewService( transportSettings, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, null @@ -502,7 +507,7 @@ public void testChangeSettings() throws Exception { MockTransportService cluster1Transport = startTransport( "cluster_1_node", knownNodes, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current() ) ) { @@ -513,7 +518,7 @@ public void testChangeSettings() throws Exception { try ( MockTransportService transportService = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, null @@ -560,16 +565,32 @@ public void testRemoteNodeAttribute() throws IOException, InterruptedException { final List knownNodes = new CopyOnWriteArrayList<>(); final Settings gateway = Settings.builder().put("node.attr.gateway", true).build(); try ( - MockTransportService c1N1 = startTransport("cluster_1_node_1", knownNodes, Version.CURRENT, TransportVersion.current()); + MockTransportService c1N1 = startTransport( + "cluster_1_node_1", + knownNodes, + VersionInformation.CURRENT, + TransportVersion.current() + ); MockTransportService c1N2 = startTransport( "cluster_1_node_2", knownNodes, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), gateway ); - MockTransportService c2N1 = startTransport("cluster_2_node_1", knownNodes, Version.CURRENT, TransportVersion.current()); - MockTransportService c2N2 = startTransport("cluster_2_node_2", knownNodes, Version.CURRENT, TransportVersion.current(), gateway) + MockTransportService c2N1 = startTransport( + "cluster_2_node_1", + knownNodes, + VersionInformation.CURRENT, + TransportVersion.current() + ); + MockTransportService c2N2 = startTransport( + "cluster_2_node_2", + knownNodes, + VersionInformation.CURRENT, + TransportVersion.current(), + gateway + ) ) { final DiscoveryNode c1N1Node = c1N1.getLocalDiscoNode(); final DiscoveryNode c1N2Node = c1N2.getLocalDiscoNode(); @@ -584,7 +605,7 @@ public void testRemoteNodeAttribute() throws IOException, InterruptedException { try ( MockTransportService transportService = MockTransportService.createNewService( settings, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, null @@ -638,19 +659,31 @@ public void testRemoteNodeRoles() throws IOException, InterruptedException { MockTransportService c1N1 = startTransport( "cluster_1_node_1", knownNodes, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), dedicatedMaster ); - MockTransportService c1N2 = startTransport("cluster_1_node_2", knownNodes, Version.CURRENT, TransportVersion.current(), data); + MockTransportService c1N2 = startTransport( + "cluster_1_node_2", + knownNodes, + VersionInformation.CURRENT, + TransportVersion.current(), + data + ); MockTransportService c2N1 = startTransport( "cluster_2_node_1", knownNodes, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), dedicatedMaster ); - MockTransportService c2N2 = startTransport("cluster_2_node_2", knownNodes, Version.CURRENT, TransportVersion.current(), data) + MockTransportService c2N2 = startTransport( + "cluster_2_node_2", + knownNodes, + VersionInformation.CURRENT, + TransportVersion.current(), + data + ) ) { final DiscoveryNode c1N1Node = c1N1.getLocalDiscoNode(); final DiscoveryNode c1N2Node = c1N2.getLocalDiscoNode(); @@ -665,7 +698,7 @@ public void testRemoteNodeRoles() throws IOException, InterruptedException { try ( MockTransportService transportService = MockTransportService.createNewService( settings, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, null @@ -723,28 +756,28 @@ public void testCollectNodes() throws InterruptedException, IOException { MockTransportService c1N1 = startTransport( "cluster_1_node_1", knownNodes_c1, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), settings ); MockTransportService c1N2 = startTransport( "cluster_1_node_2", knownNodes_c1, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), settings ); MockTransportService c2N1 = startTransport( "cluster_2_node_1", knownNodes_c2, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), settings ); MockTransportService c2N2 = startTransport( "cluster_2_node_2", knownNodes_c2, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), settings ) @@ -763,7 +796,7 @@ public void testCollectNodes() throws InterruptedException, IOException { try ( MockTransportService transportService = MockTransportService.createNewService( settings, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, null @@ -999,8 +1032,18 @@ public void testRemoteClusterSkipIfDisconnectedSetting() { public void testReconnectWhenStrategySettingsUpdated() throws Exception { List knownNodes = new CopyOnWriteArrayList<>(); try ( - MockTransportService cluster_node_0 = startTransport("cluster_node_0", knownNodes, Version.CURRENT, TransportVersion.current()); - MockTransportService cluster_node_1 = startTransport("cluster_node_1", knownNodes, Version.CURRENT, TransportVersion.current()) + MockTransportService cluster_node_0 = startTransport( + "cluster_node_0", + knownNodes, + VersionInformation.CURRENT, + TransportVersion.current() + ); + MockTransportService cluster_node_1 = startTransport( + "cluster_node_1", + knownNodes, + VersionInformation.CURRENT, + TransportVersion.current() + ) ) { final DiscoveryNode node0 = cluster_node_0.getLocalDiscoNode(); @@ -1012,7 +1055,7 @@ public void testReconnectWhenStrategySettingsUpdated() throws Exception { try ( MockTransportService transportService = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, null @@ -1092,7 +1135,14 @@ public static void addConnectionListener(RemoteClusterService service, Transport public void testSkipUnavailable() { List knownNodes = new CopyOnWriteArrayList<>(); - try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT, TransportVersion.current())) { + try ( + MockTransportService seedTransport = startTransport( + "seed_node", + knownNodes, + VersionInformation.CURRENT, + TransportVersion.current() + ) + ) { DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); knownNodes.add(seedNode); Settings.Builder builder = Settings.builder(); @@ -1100,7 +1150,7 @@ public void testSkipUnavailable() { try ( MockTransportService service = MockTransportService.createNewService( builder.build(), - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, null @@ -1127,7 +1177,7 @@ public void testRemoteClusterServiceNotEnabledGetRemoteClusterConnection() { try ( MockTransportService service = MockTransportService.createNewService( settings, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, null @@ -1148,7 +1198,7 @@ public void testRemoteClusterServiceNotEnabledGetCollectNodes() { try ( MockTransportService service = MockTransportService.createNewService( settings, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, null @@ -1170,14 +1220,14 @@ public void testUseDifferentTransportProfileForCredentialsProtectedRemoteCluster MockTransportService c1 = startTransport( "cluster_1", knownNodes, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), Settings.builder() .put(RemoteClusterPortSettings.REMOTE_CLUSTER_SERVER_ENABLED.getKey(), "true") .put(RemoteClusterPortSettings.PORT.getKey(), "0") .build() ); - MockTransportService c2 = startTransport("cluster_2", knownNodes, Version.CURRENT, TransportVersion.current()); + MockTransportService c2 = startTransport("cluster_2", knownNodes, VersionInformation.CURRENT, TransportVersion.current()); ) { final DiscoveryNode c1Node = c1.getLocalDiscoNode().withTransportAddress(c1.boundRemoteAccessAddress().publishAddress()); final DiscoveryNode c2Node = c2.getLocalDiscoNode(); @@ -1188,7 +1238,7 @@ public void testUseDifferentTransportProfileForCredentialsProtectedRemoteCluster try ( MockTransportService transportService = MockTransportService.createNewService( settings, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, null diff --git a/server/src/test/java/org/elasticsearch/transport/SniffConnectionStrategyTests.java b/server/src/test/java/org/elasticsearch/transport/SniffConnectionStrategyTests.java index 9b30b20e24b1..4cc4911cc12f 100644 --- a/server/src/test/java/org/elasticsearch/transport/SniffConnectionStrategyTests.java +++ b/server/src/test/java/org/elasticsearch/transport/SniffConnectionStrategyTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.AbstractScopedSettings; import org.elasticsearch.common.settings.ClusterSettings; @@ -30,6 +31,7 @@ import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TransportVersionUtils; import org.elasticsearch.test.VersionUtils; @@ -91,7 +93,7 @@ public void tearDown() throws Exception { private MockTransportService startTransport( String id, List knownNodes, - Version version, + VersionInformation version, TransportVersion transportVersion ) { return startTransport(id, knownNodes, version, transportVersion, Settings.EMPTY); @@ -100,7 +102,7 @@ private MockTransportService startTransport( public MockTransportService startTransport( final String id, final List knownNodes, - final Version version, + final VersionInformation version, final TransportVersion transportVersion, final Settings settings ) { @@ -150,11 +152,16 @@ public MockTransportService startTransport( public void testSniffStrategyWillConnectToAndDiscoverNodes() { List knownNodes = new CopyOnWriteArrayList<>(); try ( - MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT, TransportVersion.current()); + MockTransportService seedTransport = startTransport( + "seed_node", + knownNodes, + VersionInformation.CURRENT, + TransportVersion.current() + ); MockTransportService discoverableTransport = startTransport( "discoverable_node", knownNodes, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current() ) ) { @@ -167,7 +174,7 @@ public void testSniffStrategyWillConnectToAndDiscoverNodes() { try ( MockTransportService localService = MockTransportService.createNewService( clientSettings, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool ) @@ -210,11 +217,16 @@ public void testSniffStrategyWillConnectToAndDiscoverNodes() { public void testSniffStrategyWillResolveDiscoveryNodesEachConnect() throws Exception { List knownNodes = new CopyOnWriteArrayList<>(); try ( - MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT, TransportVersion.current()); + MockTransportService seedTransport = startTransport( + "seed_node", + knownNodes, + VersionInformation.CURRENT, + TransportVersion.current() + ); MockTransportService discoverableTransport = startTransport( "discoverable_node", knownNodes, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current() ) ) { @@ -233,7 +245,7 @@ public void testSniffStrategyWillResolveDiscoveryNodesEachConnect() throws Excep try ( MockTransportService localService = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool ) @@ -277,17 +289,22 @@ public void testSniffStrategyWillResolveDiscoveryNodesEachConnect() throws Excep public void testSniffStrategyWillConnectToMaxAllowedNodesAndOpenNewConnectionsOnDisconnect() throws Exception { List knownNodes = new CopyOnWriteArrayList<>(); try ( - MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT, TransportVersion.current()); + MockTransportService seedTransport = startTransport( + "seed_node", + knownNodes, + VersionInformation.CURRENT, + TransportVersion.current() + ); MockTransportService discoverableTransport1 = startTransport( "discoverable_node", knownNodes, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current() ); MockTransportService discoverableTransport2 = startTransport( "discoverable_node", knownNodes, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current() ) ) { @@ -302,7 +319,7 @@ public void testSniffStrategyWillConnectToMaxAllowedNodesAndOpenNewConnectionsOn try ( MockTransportService localService = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool ) @@ -353,10 +370,19 @@ public void testSniffStrategyWillConnectToMaxAllowedNodesAndOpenNewConnectionsOn public void testDiscoverWithSingleIncompatibleSeedNode() { List knownNodes = new CopyOnWriteArrayList<>(); - Version incompatibleVersion = Version.CURRENT.minimumCompatibilityVersion().minimumCompatibilityVersion(); + VersionInformation incompatibleVersion = new VersionInformation( + Version.CURRENT.minimumCompatibilityVersion().minimumCompatibilityVersion(), + IndexVersion.MINIMUM_COMPATIBLE, + IndexVersion.current() + ); TransportVersion incompatibleTransportVersion = TransportVersionUtils.getPreviousVersion(TransportVersion.MINIMUM_COMPATIBLE); try ( - MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT, TransportVersion.current()); + MockTransportService seedTransport = startTransport( + "seed_node", + knownNodes, + VersionInformation.CURRENT, + TransportVersion.current() + ); MockTransportService incompatibleSeedTransport = startTransport( "discoverable_node", knownNodes, @@ -366,7 +392,7 @@ public void testDiscoverWithSingleIncompatibleSeedNode() { MockTransportService discoverableTransport = startTransport( "discoverable_node", knownNodes, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current() ) ) { @@ -381,7 +407,7 @@ public void testDiscoverWithSingleIncompatibleSeedNode() { try ( MockTransportService localService = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool ) @@ -423,7 +449,11 @@ public void testDiscoverWithSingleIncompatibleSeedNode() { public void testConnectFailsWithIncompatibleNodes() { List knownNodes = new CopyOnWriteArrayList<>(); - Version incompatibleVersion = Version.CURRENT.minimumCompatibilityVersion().minimumCompatibilityVersion(); + VersionInformation incompatibleVersion = new VersionInformation( + Version.CURRENT.minimumCompatibilityVersion().minimumCompatibilityVersion(), + IndexVersion.MINIMUM_COMPATIBLE, + IndexVersion.current() + ); TransportVersion incompatibleTransportVersion = TransportVersionUtils.getPreviousVersion(TransportVersion.MINIMUM_COMPATIBLE); try ( MockTransportService incompatibleSeedTransport = startTransport( @@ -439,7 +469,7 @@ public void testConnectFailsWithIncompatibleNodes() { try ( MockTransportService localService = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool ) @@ -479,11 +509,16 @@ public void testConnectFailsWithIncompatibleNodes() { public void testFilterNodesWithNodePredicate() { List knownNodes = new CopyOnWriteArrayList<>(); try ( - MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT, TransportVersion.current()); + MockTransportService seedTransport = startTransport( + "seed_node", + knownNodes, + VersionInformation.CURRENT, + TransportVersion.current() + ); MockTransportService discoverableTransport = startTransport( "discoverable_node", knownNodes, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current() ) ) { @@ -497,7 +532,7 @@ public void testFilterNodesWithNodePredicate() { try ( MockTransportService localService = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool ) @@ -543,11 +578,16 @@ public void testFilterNodesWithNodePredicate() { public void testConnectFailsIfNoConnectionsOpened() { List knownNodes = new CopyOnWriteArrayList<>(); try ( - MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT, TransportVersion.current()); + MockTransportService seedTransport = startTransport( + "seed_node", + knownNodes, + VersionInformation.CURRENT, + TransportVersion.current() + ); MockTransportService closedTransport = startTransport( "discoverable_node", knownNodes, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current() ) ) { @@ -559,7 +599,7 @@ public void testConnectFailsIfNoConnectionsOpened() { try ( MockTransportService localService = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool ) @@ -603,24 +643,24 @@ public void testClusterNameValidationPreventConnectingToDifferentClusters() thro Settings otherSettings = Settings.builder().put("cluster.name", "otherCluster").build(); try ( - MockTransportService seed = startTransport("other_seed", knownNodes, Version.CURRENT, TransportVersion.current()); + MockTransportService seed = startTransport("other_seed", knownNodes, VersionInformation.CURRENT, TransportVersion.current()); MockTransportService discoverable = startTransport( "other_discoverable", knownNodes, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current() ); MockTransportService otherSeed = startTransport( "other_seed", knownNodes, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), otherSettings ); MockTransportService otherDiscoverable = startTransport( "other_discoverable", knownNodes, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), otherSettings ) @@ -637,7 +677,7 @@ public void testClusterNameValidationPreventConnectingToDifferentClusters() thro try ( MockTransportService localService = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool ) @@ -704,11 +744,16 @@ public void testClusterNameValidationPreventConnectingToDifferentClusters() thro public void testMultipleCallsToConnectEnsuresConnection() { List knownNodes = new CopyOnWriteArrayList<>(); try ( - MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT, TransportVersion.current()); + MockTransportService seedTransport = startTransport( + "seed_node", + knownNodes, + VersionInformation.CURRENT, + TransportVersion.current() + ); MockTransportService discoverableTransport = startTransport( "discoverable_node", knownNodes, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current() ) ) { @@ -721,7 +766,7 @@ public void testMultipleCallsToConnectEnsuresConnection() { try ( MockTransportService localService = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool ) @@ -775,16 +820,21 @@ public void testMultipleCallsToConnectEnsuresConnection() { public void testConfiguredProxyAddressModeWillReplaceNodeAddress() { List knownNodes = new CopyOnWriteArrayList<>(); try ( - MockTransportService accessible = startTransport("seed_node", knownNodes, Version.CURRENT, TransportVersion.current()); + MockTransportService accessible = startTransport( + "seed_node", + knownNodes, + VersionInformation.CURRENT, + TransportVersion.current() + ); MockTransportService unresponsive1 = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool ); MockTransportService unresponsive2 = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool ) @@ -816,7 +866,7 @@ public void testConfiguredProxyAddressModeWillReplaceNodeAddress() { try ( MockTransportService localService = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool ) @@ -878,11 +928,16 @@ public void testConfiguredProxyAddressModeWillReplaceNodeAddress() { public void testSniffStrategyWillNeedToBeRebuiltIfNumOfConnectionsOrSeedsOrProxyChange() { List knownNodes = new CopyOnWriteArrayList<>(); try ( - MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT, TransportVersion.current()); + MockTransportService seedTransport = startTransport( + "seed_node", + knownNodes, + VersionInformation.CURRENT, + TransportVersion.current() + ); MockTransportService discoverableTransport = startTransport( "discoverable_node", knownNodes, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current() ) ) { @@ -895,7 +950,7 @@ public void testSniffStrategyWillNeedToBeRebuiltIfNumOfConnectionsOrSeedsOrProxy try ( MockTransportService localService = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool ) diff --git a/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java b/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java index 9887320df9dd..3b1d37398f91 100644 --- a/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java @@ -12,12 +12,14 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.RefCounted; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskCancellationService; @@ -46,13 +48,21 @@ public class TransportActionProxyTests extends ESTestCase { protected ThreadPool threadPool; // we use always a non-alpha or beta version here otherwise minimumCompatibilityVersion will be different for the two used versions private static final Version CURRENT_VERSION = Version.fromString(String.valueOf(Version.CURRENT.major) + ".0.0"); - protected static final Version version0 = CURRENT_VERSION.minimumCompatibilityVersion(); + protected static final VersionInformation version0 = new VersionInformation( + CURRENT_VERSION.minimumCompatibilityVersion(), + IndexVersion.MINIMUM_COMPATIBLE, + IndexVersion.current() + ); protected static final TransportVersion transportVersion0 = TransportVersion.MINIMUM_COMPATIBLE; protected DiscoveryNode nodeA; protected MockTransportService serviceA; - protected static final Version version1 = Version.fromId(CURRENT_VERSION.id + 1); + protected static final VersionInformation version1 = new VersionInformation( + Version.fromId(CURRENT_VERSION.id + 1), + IndexVersion.MINIMUM_COMPATIBLE, + IndexVersion.current() + ); protected static final TransportVersion transportVersion1 = TransportVersion.fromId(TransportVersion.current().id() + 1); protected DiscoveryNode nodeB; protected MockTransportService serviceB; @@ -87,7 +97,7 @@ public void tearDown() throws Exception { IOUtils.close(serviceA, serviceB, serviceC, serviceD, () -> { terminate(threadPool); }); } - private MockTransportService buildService(Version version, TransportVersion transportVersion) { + private MockTransportService buildService(VersionInformation version, TransportVersion transportVersion) { MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, version, transportVersion, threadPool, null); service.start(); service.acceptIncomingRequests(); diff --git a/server/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java b/server/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java index b47711cc1498..6f750b7d70bf 100644 --- a/server/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java @@ -15,12 +15,14 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TransportVersionUtils; @@ -62,7 +64,7 @@ private TransportService startServices( String nodeNameAndId, Settings settings, TransportVersion transportVersion, - Version nodeVersion, + VersionInformation nodeVersion, TransportInterceptor transportInterceptor ) { TcpTransport transport = new Netty4Transport( @@ -119,14 +121,18 @@ public void testConnectToNodeLight() { "TS_A", settings, TransportVersion.current(), - Version.CURRENT, + VersionInformation.CURRENT, TransportService.NOOP_TRANSPORT_INTERCEPTOR ); TransportService transportServiceB = startServices( "TS_B", settings, TransportVersionUtils.randomCompatibleVersion(random()), - VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT), + new VersionInformation( + VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT), + IndexVersion.MINIMUM_COMPATIBLE, + IndexVersion.current() + ), TransportService.NOOP_TRANSPORT_INTERCEPTOR ); DiscoveryNode discoveryNode = DiscoveryNodeUtils.builder("") @@ -156,14 +162,14 @@ public void testMismatchedClusterName() { "TS_A", Settings.builder().put("cluster.name", "a").build(), TransportVersion.current(), - Version.CURRENT, + VersionInformation.CURRENT, TransportService.NOOP_TRANSPORT_INTERCEPTOR ); TransportService transportServiceB = startServices( "TS_B", Settings.builder().put("cluster.name", "b").build(), TransportVersion.current(), - Version.CURRENT, + VersionInformation.CURRENT, TransportService.NOOP_TRANSPORT_INTERCEPTOR ); DiscoveryNode discoveryNode = DiscoveryNodeUtils.builder("") @@ -195,14 +201,18 @@ public void testIncompatibleNodeVersions() { "TS_A", settings, TransportVersion.current(), - Version.CURRENT, + VersionInformation.CURRENT, TransportService.NOOP_TRANSPORT_INTERCEPTOR ); TransportService transportServiceB = startServices( "TS_B", settings, TransportVersion.MINIMUM_COMPATIBLE, - VersionUtils.getPreviousVersion(Version.CURRENT.minimumCompatibilityVersion()), + new VersionInformation( + VersionUtils.getPreviousVersion(Version.CURRENT.minimumCompatibilityVersion()), + IndexVersion.MINIMUM_COMPATIBLE, + IndexVersion.current() + ), TransportService.NOOP_TRANSPORT_INTERCEPTOR ); DiscoveryNode discoveryNode = DiscoveryNodeUtils.builder("") @@ -242,14 +252,14 @@ public void testIncompatibleTransportVersions() { "TS_A", settings, TransportVersion.current(), - Version.CURRENT, + VersionInformation.CURRENT, TransportService.NOOP_TRANSPORT_INTERCEPTOR ); TransportService transportServiceB = startServices( "TS_B", settings, TransportVersionUtils.getPreviousVersion(TransportVersion.MINIMUM_COMPATIBLE), - Version.CURRENT.minimumCompatibilityVersion(), + new VersionInformation(Version.CURRENT.minimumCompatibilityVersion(), IndexVersion.MINIMUM_COMPATIBLE, IndexVersion.current()), TransportService.NOOP_TRANSPORT_INTERCEPTOR ); DiscoveryNode discoveryNode = DiscoveryNodeUtils.builder("") @@ -278,14 +288,14 @@ public void testNodeConnectWithDifferentNodeId() { "TS_A", settings, TransportVersion.current(), - Version.CURRENT, + VersionInformation.CURRENT, TransportService.NOOP_TRANSPORT_INTERCEPTOR ); TransportService transportServiceB = startServices( "TS_B", settings, TransportVersion.current(), - Version.CURRENT, + VersionInformation.CURRENT, TransportService.NOOP_TRANSPORT_INTERCEPTOR ); DiscoveryNode discoveryNode = DiscoveryNodeUtils.builder(randomAlphaOfLength(10)) @@ -314,14 +324,14 @@ public void testRejectsMismatchedBuildHash() { "TS_A", settings, TransportVersion.current(), - Version.CURRENT, + VersionInformation.CURRENT, transportInterceptorA ); final TransportService transportServiceB = startServices( "TS_B", settings, TransportVersion.current(), - Version.CURRENT, + VersionInformation.CURRENT, transportInterceptorB ); final DiscoveryNode discoveryNode = DiscoveryNodeUtils.builder("") @@ -365,14 +375,14 @@ public void testAcceptsMismatchedServerlessBuildHash() { "TS_A", settings, TransportVersion.current(), - Version.CURRENT, + VersionInformation.CURRENT, transportInterceptorA ); final TransportService transportServiceB = startServices( "TS_B", settings, TransportVersion.current(), - Version.CURRENT, + VersionInformation.CURRENT, transportInterceptorB ); AbstractSimpleTransportTestCase.connectToNode(transportServiceA, transportServiceB.getLocalNode(), TestProfiles.LIGHT_PROFILE); @@ -391,14 +401,14 @@ public void testAcceptsMismatchedBuildHashFromDifferentVersion() { "TS_A", Settings.builder().put("cluster.name", "a").build(), TransportVersion.current(), - Version.CURRENT, + VersionInformation.CURRENT, transportInterceptorA ); final TransportService transportServiceB = startServices( "TS_B", Settings.builder().put("cluster.name", "a").build(), TransportVersion.MINIMUM_COMPATIBLE, - Version.CURRENT.minimumCompatibilityVersion(), + new VersionInformation(Version.CURRENT.minimumCompatibilityVersion(), IndexVersion.MINIMUM_COMPATIBLE, IndexVersion.current()), transportInterceptorB ); AbstractSimpleTransportTestCase.connectToNode(transportServiceA, transportServiceB.getLocalNode(), TestProfiles.LIGHT_PROFILE); diff --git a/test/external-modules/seek-tracking-directory/src/main/java/org/elasticsearch/test/seektracker/SeekTrackerPlugin.java b/test/external-modules/seek-tracking-directory/src/main/java/org/elasticsearch/test/seektracker/SeekTrackerPlugin.java index bff54e9e9cf8..80a06c55f234 100644 --- a/test/external-modules/seek-tracking-directory/src/main/java/org/elasticsearch/test/seektracker/SeekTrackerPlugin.java +++ b/test/external-modules/seek-tracking-directory/src/main/java/org/elasticsearch/test/seektracker/SeekTrackerPlugin.java @@ -24,6 +24,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.IndexModule; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; @@ -75,7 +76,8 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { return Collections.singletonList(seekStatsService); } diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java index 452b0e5b565c..a5a9bf30f5c7 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java @@ -477,7 +477,7 @@ public static MetadataRolloverService getMetadataRolloverService( null, ScriptCompiler.NONE, false, - IndexVersion.CURRENT + IndexVersion.current() ).build(MapperBuilderContext.root(false)); ClusterService clusterService = ClusterServiceUtils.createClusterService(testThreadPool); Environment env = mock(Environment.class); @@ -495,7 +495,7 @@ public static MetadataRolloverService getMetadataRolloverService( DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, ScriptCompiler.NONE, true, - IndexVersion.CURRENT + IndexVersion.current() ) ); MetadataFieldMapper dtfm = getDataStreamTimestampFieldMapper(); diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java b/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java index 9514363ee34c..8306fded6c29 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java @@ -8,9 +8,9 @@ package org.elasticsearch.cluster.routing; -import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.snapshots.Snapshot; @@ -321,7 +321,7 @@ public static RecoverySource randomRecoverySource() { new RecoverySource.SnapshotRecoverySource( UUIDs.randomBase64UUID(), new Snapshot("repo", new SnapshotId(randomAlphaOfLength(8), UUIDs.randomBase64UUID())), - Version.CURRENT, + IndexVersion.current(), new IndexId("some_index", UUIDs.randomBase64UUID(random())) ) ); diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java index 1c25253b2f3a..a7a6fdf098af 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java @@ -42,7 +42,7 @@ private static SearchExecutionContext createMockSearchExecutionContext(boolean a when(searchExecutionContext.isSourceEnabled()).thenReturn(true); SearchLookup searchLookup = mock(SearchLookup.class); when(searchExecutionContext.lookup()).thenReturn(searchLookup); - when(searchExecutionContext.indexVersionCreated()).thenReturn(IndexVersion.CURRENT); + when(searchExecutionContext.indexVersionCreated()).thenReturn(IndexVersion.current()); return searchExecutionContext; } diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index 833321ccf1a0..3f9c0fae4c15 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -155,7 +155,7 @@ protected final MapperService createMapperService(XContentBuilder mappings) thro } protected IndexVersion getVersion() { - return IndexVersion.CURRENT; + return IndexVersion.current(); } protected final MapperService createMapperService(Settings settings, XContentBuilder mappings) throws IOException { @@ -173,7 +173,7 @@ protected final MapperService createMapperService(String mappings) throws IOExce } protected final MapperService createMapperService(Settings settings, String mappings) throws IOException { - MapperService mapperService = createMapperService(IndexVersion.CURRENT, settings, () -> true, mapping(b -> {})); + MapperService mapperService = createMapperService(IndexVersion.current(), settings, () -> true, mapping(b -> {})); merge(mapperService, mappings); return mapperService; } diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java index 1f3989a0f7f9..ed1c59f9b75c 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java @@ -975,7 +975,7 @@ public final void testMinimalIsInvalidInRoutingPath() throws IOException { MapperService mapper = createMapperService(fieldMapping(this::minimalMapping)); try { IndexSettings settings = createIndexSettings( - IndexVersion.CURRENT, + IndexVersion.current(), Settings.builder() .put(IndexSettings.MODE.getKey(), "time_series") .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "field") diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MetadataMapperTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MetadataMapperTestCase.java index 4ffe7e897d8f..d8cf644e8710 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MetadataMapperTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MetadataMapperTestCase.java @@ -166,7 +166,7 @@ public void testTypeAndFriendsAreAcceptedBefore_8_6_0() throws IOException { public void testTypeAndFriendsAreDeprecatedFrom_8_6_0() throws IOException { assumeTrue("Metadata field " + fieldName() + " isn't configurable", isConfigurable()); - IndexVersion version = IndexVersionUtils.randomVersionBetween(random(), IndexVersion.V_8_6_0, IndexVersion.CURRENT); + IndexVersion version = IndexVersionUtils.randomVersionBetween(random(), IndexVersion.V_8_6_0, IndexVersion.current()); assumeTrue("Metadata field " + fieldName() + " is not supported on version " + version, isSupportedOn(version)); MapperService mapperService = createMapperService(version, mapping(b -> {})); // these parameters were previously silently ignored, they are now deprecated in new indices diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/TestDocumentParserContext.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/TestDocumentParserContext.java index 8cda663e5f00..4e953d02e4d8 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/TestDocumentParserContext.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/TestDocumentParserContext.java @@ -53,12 +53,12 @@ private TestDocumentParserContext(MappingLookup mappingLookup, SourceToParse sou s -> null, s -> null, s -> null, - IndexVersion.CURRENT, + IndexVersion.current(), () -> TransportVersion.current(), () -> null, null, (type, name) -> Lucene.STANDARD_ANALYZER, - MapperTestCase.createIndexSettings(IndexVersion.CURRENT, Settings.EMPTY), + MapperTestCase.createIndexSettings(IndexVersion.current(), Settings.EMPTY), null ), source, diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 409ff529c9e1..ef75f81d36ee 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -39,6 +39,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.MapperTestUtils; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.cache.IndexCache; @@ -1032,7 +1033,7 @@ public static boolean recoverFromStore(IndexShard newShard) { /** Recover a shard from a snapshot using a given repository **/ protected void recoverShardFromSnapshot(final IndexShard shard, final Snapshot snapshot, final Repository repository) { - final Version version = Version.CURRENT; + final IndexVersion version = IndexVersion.current(); final ShardId shardId = shard.shardId(); final IndexId indexId = new IndexId(shardId.getIndex().getName(), shardId.getIndex().getUUID()); final DiscoveryNode node = getFakeDiscoNode(shard.routingEntry().currentNodeId()); diff --git a/test/framework/src/main/java/org/elasticsearch/ingest/RandomDocumentPicks.java b/test/framework/src/main/java/org/elasticsearch/ingest/RandomDocumentPicks.java index d9cc17e88adb..db5ccba2ba6a 100644 --- a/test/framework/src/main/java/org/elasticsearch/ingest/RandomDocumentPicks.java +++ b/test/framework/src/main/java/org/elasticsearch/ingest/RandomDocumentPicks.java @@ -68,6 +68,10 @@ public static String randomExistingFieldName(Random random, IngestDocument inges while (randomEntry.getValue() instanceof Map) { @SuppressWarnings("unchecked") Map map = (Map) randomEntry.getValue(); + // we have reached an empty map hence the max depth we can reach + if (map.isEmpty()) { + break; + } Map treeMap = new TreeMap<>(map); randomEntry = RandomPicks.randomFrom(random, treeMap.entrySet()); key += "." + randomEntry.getKey(); diff --git a/test/framework/src/main/java/org/elasticsearch/search/FailBeforeCurrentVersionQueryBuilder.java b/test/framework/src/main/java/org/elasticsearch/search/FailBeforeCurrentVersionQueryBuilder.java index 3e71504fbb20..6c08ff43033e 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/FailBeforeCurrentVersionQueryBuilder.java +++ b/test/framework/src/main/java/org/elasticsearch/search/FailBeforeCurrentVersionQueryBuilder.java @@ -10,7 +10,6 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.xcontent.XContentParser; @@ -23,6 +22,7 @@ public class FailBeforeCurrentVersionQueryBuilder extends DummyQueryBuilder { public static final String NAME = "fail_before_current_version"; + public static final int FUTURE_VERSION = TransportVersion.current().id() + 11_111; public FailBeforeCurrentVersionQueryBuilder(StreamInput in) throws IOException { super(in); @@ -30,15 +30,6 @@ public FailBeforeCurrentVersionQueryBuilder(StreamInput in) throws IOException { public FailBeforeCurrentVersionQueryBuilder() {} - @Override - protected void doWriteTo(StreamOutput out) { - if (out.getTransportVersion().before(TransportVersion.current())) { - throw new IllegalArgumentException( - "This query isn't serializable with transport versions before " + TransportVersion.current() - ); - } - } - public static DummyQueryBuilder fromXContent(XContentParser parser) throws IOException { DummyQueryBuilder.fromXContent(parser); return new FailBeforeCurrentVersionQueryBuilder(); @@ -53,4 +44,11 @@ public String getWriteableName() { protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws IOException { return this; } + + @Override + public TransportVersion getMinimalSupportedVersion() { + // this is what causes the failure - it always reports a version in the future, so it is never compatible with + // current or minimum CCS TransportVersion + return new TransportVersion(FUTURE_VERSION); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java index 2bc171ee5b6c..35893c8464f9 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java @@ -1221,7 +1221,7 @@ private static class MockParserContext extends MappingParserContext { null, null, null, - IndexVersion.CURRENT, + IndexVersion.current(), () -> TransportVersion.current(), null, ScriptCompiler.NONE, diff --git a/test/framework/src/main/java/org/elasticsearch/search/geo/BasePointShapeQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/geo/BasePointShapeQueryTestCase.java index d128a65acb14..cd54a72ccf36 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/geo/BasePointShapeQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/geo/BasePointShapeQueryTestCase.java @@ -391,6 +391,7 @@ public void testQueryWithinMultiLine() throws Exception { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/96999") public void testQueryLinearRing() throws Exception { createMapping(defaultIndexName, defaultFieldName); ensureGreen(); diff --git a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 2380d51caf1a..5df7537cb69e 100644 --- a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -35,6 +35,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.FinalizeSnapshotContext; import org.elasticsearch.repositories.RepositoriesService; @@ -367,6 +368,15 @@ protected void maybeInitWithOldSnapshotVersion(String repoName, Path repoPath) t } } + private static String versionString(Version version) { + if (version.before(Version.V_8_9_0)) { + // add back the "" for a json String + return "\"" + version + "\""; + } else { + return version.indexVersion.toString(); + } + } + /** * Workaround to simulate BwC situation: taking a snapshot without indices here so that we don't create any new version shard * generations (the existence of which would short-circuit checks for the repo containing old version snapshots) @@ -388,7 +398,7 @@ protected String initWithSnapshotVersion(String repoName, Path repoPath, Version final RepositoryData downgradedRepoData = RepositoryData.snapshotsFromXContent( JsonXContent.jsonXContent.createParser( XContentParserConfiguration.EMPTY, - Strings.toString(jsonBuilder).replace(Version.CURRENT.toString(), version.toString()) + Strings.toString(jsonBuilder).replace(IndexVersion.current().toString(), versionString(version)) ), repositoryData.getGenId(), randomBoolean() @@ -403,7 +413,7 @@ protected String initWithSnapshotVersion(String repoName, Path repoPath, Version JsonXContent.jsonXContent.createParser( XContentParserConfiguration.EMPTY, Strings.toString(snapshotInfo, ChecksumBlobStoreFormat.SNAPSHOT_ONLY_FORMAT_PARAMS) - .replace(String.valueOf(Version.CURRENT.id), String.valueOf(version.id)) + .replace(String.valueOf(IndexVersion.current().id()), String.valueOf(version.id)) ) ); final BlobStoreRepository blobStoreRepository = getRepositoryOnMaster(repoName); @@ -521,7 +531,7 @@ protected void addBwCFailedSnapshot(String repoName, String snapshotName, Map nowInMillis); + } + ScriptModule createScriptModule(List scriptPlugins) { if (scriptPlugins == null || scriptPlugins.isEmpty()) { return new ScriptModule(Settings.EMPTY, singletonList(new ScriptPlugin() { diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index b0d4e0dfd4d5..4872163b0083 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -1910,7 +1910,7 @@ public String toString() { * @param message an additional message or link with information on the fix */ protected void skipTestWaitingForLuceneFix(org.apache.lucene.util.Version luceneVersionWithFix, String message) { - final boolean currentVersionHasFix = IndexVersion.CURRENT.luceneVersion().onOrAfter(luceneVersionWithFix); + final boolean currentVersionHasFix = IndexVersion.current().luceneVersion().onOrAfter(luceneVersionWithFix); assumeTrue("Skipping test as it is waiting on a Lucene fix: " + message, currentVersionHasFix); fail("Remove call of skipTestWaitingForLuceneFix in " + RandomizedTest.getContext().getTargetMethod()); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/MockIndexEventListener.java b/test/framework/src/main/java/org/elasticsearch/test/MockIndexEventListener.java index 298dae9d3e32..a130ee136d0b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/MockIndexEventListener.java +++ b/test/framework/src/main/java/org/elasticsearch/test/MockIndexEventListener.java @@ -27,6 +27,7 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; @@ -88,7 +89,8 @@ public Collection createComponents( IndexNameExpressionResolver expressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { return Collections.singletonList(listener); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/compiler/InMemoryJavaCompiler.java b/test/framework/src/main/java/org/elasticsearch/test/compiler/InMemoryJavaCompiler.java index 19452d72ad82..a8e4b925a6f0 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/compiler/InMemoryJavaCompiler.java +++ b/test/framework/src/main/java/org/elasticsearch/test/compiler/InMemoryJavaCompiler.java @@ -104,6 +104,10 @@ private static class FileManagerWrapper extends ForwardingJavaFileManager getFiles() { + return this.files; + } + @Override public JavaFileObject getJavaFileForOutput(Location location, String className, Kind kind, FileObject sibling) throws IOException { return files.stream() @@ -129,11 +133,15 @@ static Supplier newIOException(String className, List compile(Map sources, String... options) { var files = sources.entrySet().stream().map(e -> new InMemoryJavaFileObject(e.getKey(), e.getValue())).toList(); - CompilationTask task = getCompilationTask(files, options); - - boolean result = PrivilegedOperations.compilationTaskCall(task); - if (result == false) { - throw new RuntimeException("Could not compile " + sources.entrySet().stream().toList()); + try (FileManagerWrapper wrapper = new FileManagerWrapper(files)) { + CompilationTask task = getCompilationTask(wrapper, options); + + boolean result = PrivilegedOperations.compilationTaskCall(task); + if (result == false) { + throw new RuntimeException("Could not compile " + sources.entrySet().stream().toList()); + } + } catch (IOException e) { + throw new RuntimeException("Could not close file manager for " + sources.entrySet().stream().toList()); } return files.stream().collect(Collectors.toMap(InMemoryJavaFileObject::getClassName, InMemoryJavaFileObject::getByteCode)); @@ -150,13 +158,16 @@ public static Map compile(Map sources, Str */ public static byte[] compile(String className, CharSequence sourceCode, String... options) { InMemoryJavaFileObject file = new InMemoryJavaFileObject(className, sourceCode); - CompilationTask task = getCompilationTask(file, options); - - boolean result = PrivilegedOperations.compilationTaskCall(task); - if (result == false) { - throw new RuntimeException("Could not compile " + className + " with source code " + sourceCode); + try (FileManagerWrapper wrapper = new FileManagerWrapper(file)) { + CompilationTask task = getCompilationTask(wrapper, options); + + boolean result = PrivilegedOperations.compilationTaskCall(task); + if (result == false) { + throw new RuntimeException("Could not compile " + className + " with source code " + sourceCode); + } + } catch (IOException e) { + throw new RuntimeException("Could not close file handler for class " + className + " with source code " + sourceCode); } - return file.getByteCode(); } @@ -164,11 +175,7 @@ private static JavaCompiler getCompiler() { return ToolProvider.getSystemJavaCompiler(); } - private static CompilationTask getCompilationTask(List files, String... options) { - return getCompiler().getTask(null, new FileManagerWrapper(files), null, List.of(options), null, files); - } - - private static CompilationTask getCompilationTask(InMemoryJavaFileObject file, String... options) { - return getCompiler().getTask(null, new FileManagerWrapper(file), null, List.of(options), null, List.of(file)); + private static CompilationTask getCompilationTask(FileManagerWrapper wrapper, String... options) { + return getCompiler().getTask(null, wrapper, null, List.of(options), null, wrapper.getFiles()); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/index/IndexVersionUtils.java b/test/framework/src/main/java/org/elasticsearch/test/index/IndexVersionUtils.java index 3a4e13562bd9..c69580b95827 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/index/IndexVersionUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/index/IndexVersionUtils.java @@ -74,8 +74,8 @@ public static IndexVersion randomVersionBetween(Random random, @Nullable IndexVe } public static IndexVersion getPreviousVersion() { - IndexVersion version = getPreviousVersion(IndexVersion.CURRENT); - assert version.before(IndexVersion.CURRENT); + IndexVersion version = getPreviousVersion(IndexVersion.current()); + assert version.before(IndexVersion.current()); return version; } @@ -108,9 +108,9 @@ public static IndexVersion getNextVersion(IndexVersion version) { return ALL_VERSIONS.get(place); } - /** Returns a random {@code IndexVersion} that is compatible with {@link IndexVersion#CURRENT} */ + /** Returns a random {@code IndexVersion} that is compatible with {@link IndexVersion#current()} */ public static IndexVersion randomCompatibleVersion(Random random) { - return randomVersionBetween(random, IndexVersion.MINIMUM_COMPATIBLE, IndexVersion.CURRENT); + return randomVersionBetween(random, IndexVersion.MINIMUM_COMPATIBLE, IndexVersion.current()); } /** Returns a random {@code IndexVersion} that is compatible with the previous version to {@code version} */ diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/RestActionTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/RestActionTestCase.java index 6a0d74160a5a..e15a4e4ec3ec 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/RestActionTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/RestActionTestCase.java @@ -40,7 +40,7 @@ public abstract class RestActionTestCase extends ESTestCase { @Before public void setUpController() { verifyingClient = new VerifyingClient(this.getTestName()); - controller = new RestController(null, verifyingClient, new NoneCircuitBreakerService(), new UsageService(), Tracer.NOOP, false); + controller = new RestController(null, verifyingClient, new NoneCircuitBreakerService(), new UsageService(), Tracer.NOOP); } @After diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index 5949df610c70..7a96b6a1c272 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -99,7 +100,7 @@ public List> getSettings() { public static MockTransportService createNewService( Settings settings, - Version version, + VersionInformation version, TransportVersion transportVersion, ThreadPool threadPool ) { @@ -108,7 +109,7 @@ public static MockTransportService createNewService( public static MockTransportService createNewService( Settings settings, - Version version, + VersionInformation version, TransportVersion transportVersion, ThreadPool threadPool, @Nullable ClusterSettings clusterSettings @@ -143,7 +144,7 @@ public static TcpTransport newMockTransport(Settings settings, TransportVersion public static MockTransportService createNewService( Settings settings, Transport transport, - Version version, + VersionInformation version, ThreadPool threadPool, @Nullable ClusterSettings clusterSettings, Set taskHeaders @@ -154,7 +155,7 @@ public static MockTransportService createNewService( public static MockTransportService createNewService( Settings settings, Transport transport, - Version version, + VersionInformation version, ThreadPool threadPool, @Nullable ClusterSettings clusterSettings, Set taskHeaders, diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 6ab16e4bb17d..d3add5fc9998 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; @@ -44,6 +45,7 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.mocksocket.MockServerSocket; import org.elasticsearch.node.Node; import org.elasticsearch.tasks.Task; @@ -115,14 +117,22 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { protected ThreadPool threadPool; // we use always a non-alpha or beta version here otherwise minimumCompatibilityVersion will be different for the two used versions - protected static final Version version0 = Version.fromString(String.valueOf(Version.CURRENT.major) + ".0.0"); + protected static final VersionInformation version0 = new VersionInformation( + Version.fromString(String.valueOf(Version.CURRENT.major) + ".0.0"), + IndexVersion.MINIMUM_COMPATIBLE, + IndexVersion.current() + ); protected static final TransportVersion transportVersion0 = TransportVersion.current(); protected volatile DiscoveryNode nodeA; protected volatile MockTransportService serviceA; protected ClusterSettings clusterSettingsA; - protected static final Version version1 = Version.fromId(version0.id + 1); + protected static final VersionInformation version1 = new VersionInformation( + Version.fromId(version0.nodeVersion().id + 1), + IndexVersion.MINIMUM_COMPATIBLE, + IndexVersion.current() + ); protected static final TransportVersion transportVersion1 = TransportVersion.fromId(transportVersion0.id() + 1); protected volatile DiscoveryNode nodeB; protected volatile MockTransportService serviceB; @@ -199,7 +209,7 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti private MockTransportService buildService( String name, - Version version, + VersionInformation version, TransportVersion transportVersion, @Nullable ClusterSettings clusterSettings, Settings settings, @@ -235,7 +245,7 @@ private MockTransportService buildService( protected MockTransportService buildService( String name, - Version version, + VersionInformation version, TransportVersion transportVersion, @Nullable ClusterSettings clusterSettings, Settings settings, @@ -254,13 +264,18 @@ protected MockTransportService buildService( ); } - protected MockTransportService buildService(String name, Version version, TransportVersion transportVersion, Settings settings) { + protected MockTransportService buildService( + String name, + VersionInformation version, + TransportVersion transportVersion, + Settings settings + ) { return buildService(name, version, transportVersion, null, settings); } protected MockTransportService buildService( String name, - Version version, + VersionInformation version, TransportVersion transportVersion, ClusterSettings clusterSettings, Settings settings @@ -2205,7 +2220,11 @@ public void testHandshakeWithIncompatVersion() { try ( MockTransportService service = buildService( "TS_C", - Version.CURRENT.minimumCompatibilityVersion(), + new VersionInformation( + Version.CURRENT.minimumCompatibilityVersion(), + IndexVersion.MINIMUM_COMPATIBLE, + IndexVersion.current() + ), transportVersion, Settings.EMPTY ) @@ -2235,13 +2254,24 @@ public void testHandshakeUpdatesVersion() throws IOException { try ( MockTransportService service = buildService( "TS_C", - Version.CURRENT.minimumCompatibilityVersion(), + new VersionInformation( + Version.CURRENT.minimumCompatibilityVersion(), + IndexVersion.MINIMUM_COMPATIBLE, + IndexVersion.current() + ), transportVersion, Settings.EMPTY ) ) { TransportAddress address = service.boundAddress().publishAddress(); - DiscoveryNode node = new DiscoveryNode("TS_TPC", "TS_TPC", address, emptyMap(), emptySet(), Version.fromString("2.0.0")); + DiscoveryNode node = new DiscoveryNode( + "TS_TPC", + "TS_TPC", + address, + emptyMap(), + emptySet(), + VersionInformation.inferVersions(Version.fromString("2.0.0")) + ); ConnectionProfile.Builder builder = new ConnectionProfile.Builder(); builder.addConnections( 1, @@ -2264,7 +2294,7 @@ public void testKeepAlivePings() throws Exception { ConnectionProfile defaultProfile = ConnectionProfile.buildDefaultConnectionProfile(Settings.EMPTY); ConnectionProfile connectionProfile = new ConnectionProfile.Builder(defaultProfile).setPingInterval(TimeValue.timeValueMillis(50)) .build(); - try (TransportService service = buildService("TS_TPC", Version.CURRENT, TransportVersion.current(), Settings.EMPTY)) { + try (TransportService service = buildService("TS_TPC", VersionInformation.CURRENT, TransportVersion.current(), Settings.EMPTY)) { PlainActionFuture future = PlainActionFuture.newFuture(); DiscoveryNode node = new DiscoveryNode( "TS_TPC", @@ -2285,7 +2315,7 @@ public void testKeepAlivePings() throws Exception { public void testTcpHandshake() { assumeTrue("only tcp transport has a handshake method", serviceA.getOriginalTransport() instanceof TcpTransport); ConnectionProfile connectionProfile = ConnectionProfile.buildDefaultConnectionProfile(Settings.EMPTY); - try (TransportService service = buildService("TS_TPC", Version.CURRENT, TransportVersion.current(), Settings.EMPTY)) { + try (TransportService service = buildService("TS_TPC", VersionInformation.CURRENT, TransportVersion.current(), Settings.EMPTY)) { DiscoveryNode node = new DiscoveryNode( "TS_TPC", "TS_TPC", @@ -2971,7 +3001,7 @@ public void testBindUnavailableAddress() { .build(); BindTransportException bindTransportException = expectThrows( BindTransportException.class, - () -> buildService("test", Version.CURRENT, TransportVersion.current(), settings) + () -> buildService("test", VersionInformation.CURRENT, TransportVersion.current(), settings) ); InetSocketAddress inetSocketAddress = serviceA.boundAddress().publishAddress().address(); assertEquals("Failed to bind to " + NetworkAddress.format(inetSocketAddress), bindTransportException.getMessage()); diff --git a/x-pack/docs/en/rest-api/security/bulk-update-api-keys.asciidoc b/x-pack/docs/en/rest-api/security/bulk-update-api-keys.asciidoc index 698528715b40..2e7034caaae8 100644 --- a/x-pack/docs/en/rest-api/security/bulk-update-api-keys.asciidoc +++ b/x-pack/docs/en/rest-api/security/bulk-update-api-keys.asciidoc @@ -55,7 +55,7 @@ You can assign new privileges by specifying them in this parameter. To remove assigned privileges, supply the `role_descriptors` parameter as an empty object `{}`. If an API key has no assigned privileges, it inherits the owner user's full permissions. The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter or not. -The structure of a role descriptor is the same as the request for the <>. +The structure of a role descriptor is the same as the request for the <>. `metadata`:: (Optional, object) Arbitrary, nested metadata to associate with the API keys. @@ -82,7 +82,7 @@ POST /_security/api_key "role_descriptors": { "role-a": { "cluster": ["all"], - "index": [ + "indices": [ { "names": ["index-a*"], "privileges": ["read"] @@ -156,7 +156,7 @@ Further, assume that the owner user's permissions are: -------------------------------------------------- { "cluster": ["all"], - "index": [ + "indices": [ { "names": ["*"], "privileges": ["all"] @@ -178,7 +178,7 @@ POST /_security/api_key/_bulk_update ], "role_descriptors": { "role-a": { - "index": [ + "indices": [ { "names": ["*"], "privileges": ["write"] @@ -215,7 +215,7 @@ Both API keys' effective permissions after the update will be the intersection o [source,js] -------------------------------------------------- { - "index": [ + "indices": [ { "names": ["*"], "privileges": ["write"] @@ -259,7 +259,7 @@ The API keys' effective permissions after the update will be the same as the own -------------------------------------------------- { "cluster": ["all"], - "index": [ + "indices": [ { "names": ["*"], "privileges": ["all"] @@ -275,7 +275,7 @@ For the next example, assume that the owner user's permissions have changed from -------------------------------------------------- { "cluster": ["manage_security"], - "index": [ + "indices": [ { "names": ["*"], "privileges": ["read"] @@ -318,7 +318,7 @@ Resulting in the following effective permissions for both API keys: -------------------------------------------------- { "cluster": ["manage_security"], - "index": [ + "indices": [ { "names": ["*"], "privileges": ["read"] diff --git a/x-pack/docs/en/rest-api/security/create-api-keys.asciidoc b/x-pack/docs/en/rest-api/security/create-api-keys.asciidoc index 0373bda87227..f740e9413e3e 100644 --- a/x-pack/docs/en/rest-api/security/create-api-keys.asciidoc +++ b/x-pack/docs/en/rest-api/security/create-api-keys.asciidoc @@ -56,8 +56,6 @@ then the API key will have a _point in time snapshot of permissions of the authenticated user_. If you supply role descriptors then the resultant permissions would be an intersection of API keys permissions and authenticated user's permissions thereby limiting the access scope for API keys. -The structure of role descriptor is the same as the request for create role API. -For more details, see <>. + -- NOTE: Due to the way in which this permission intersection is calculated, it is not @@ -67,6 +65,49 @@ role descriptor with no privileges. The derived API key can be used for authentication; it will not have authority to call {es} APIs. -- ++ +`applications`::: (list) A list of application privilege entries. +`application` (required):::: (string) The name of the application to which this entry applies +`privileges` (required):::: (list) A list of strings, where each element is the name of an application +privilege or action. +`resources` (required):::: (list) A list resources to which the privileges are applied. + +`cluster`::: (list) A list of cluster privileges. These privileges define the +cluster level actions that API keys are able to execute. + +`global`::: (object) An object defining global privileges. A global privilege is +a form of cluster privilege that is request-aware. Support for global privileges +is currently limited to the management of application privileges. +This field is optional. + +`indices`::: (list) A list of indices permissions entries. +`field_security`:::: (object) The document fields that the API keys have +read access to. For more information, see +<>. +`names` (required):::: (list) A list of indices (or index name patterns) to which the +permissions in this entry apply. +`privileges`(required):::: (list) The index level privileges that the API keys +have on the specified indices. +`query`:::: A search query that defines the documents the API keys have +read access to. A document within the specified indices must match this query in +order for it to be accessible by the API keys. + +`metadata`::: (object) Optional meta-data. Within the `metadata` object, keys +that begin with `_` are reserved for system usage. + +`restriction`::: (object) Optional restriction for when the role descriptor is allowed to be effective. For more information, see +<>. +`workflows`:::: (list) A list of workflows to which the API key is restricted. +For a full list see <>. ++ +-- +NOTE: In order to use role restriction, an API key must be created with a *single role descriptor*. +-- ++ + +`run_as`::: (list) A list of users that the API keys can impersonate. +For more information, see +<>. `expiration`:: (Optional, string) Expiration time for the API key. By default, API keys never @@ -92,7 +133,7 @@ POST /_security/api_key "role_descriptors": { <2> "role-a": { "cluster": ["all"], - "index": [ + "indices": [ { "names": ["index-a*"], "privileges": ["read"] @@ -101,7 +142,7 @@ POST /_security/api_key }, "role-b": { "cluster": ["all"], - "index": [ + "indices": [ { "names": ["index-b*"], "privileges": ["all"] @@ -170,3 +211,29 @@ echo -n "VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw" | base64 <1> ---- <1> Use `-n` so that the `echo` command doesn't print the trailing newline character + +//tag::create-api-key-with-role-restriction-example[] +The following example creates an API key with a <> to the `search_application_query` workflow, +which allows to call only <>: + +[source,console] +---- +POST /_security/api_key +{ + "name": "my-restricted-api-key", + "role_descriptors": { + "my-restricted-role-descriptor": { + "indices": [ + { + "names": ["my-search-app"], + "privileges": ["read"] + } + ], + "restriction": { + "workflows": ["search_application_query"] + } + } + } +} +---- +//end::create-api-key-with-role-restriction-example[] diff --git a/x-pack/docs/en/rest-api/security/create-roles.asciidoc b/x-pack/docs/en/rest-api/security/create-roles.asciidoc index 8a7b164773c4..be874aca5c7e 100644 --- a/x-pack/docs/en/rest-api/security/create-roles.asciidoc +++ b/x-pack/docs/en/rest-api/security/create-roles.asciidoc @@ -140,4 +140,4 @@ POST /_security/role/cli_or_drivers_minimal ] } -------------------------------------------------- -// end::sql-queries-permission[] \ No newline at end of file +// end::sql-queries-permission[] diff --git a/x-pack/docs/en/rest-api/security/grant-api-keys.asciidoc b/x-pack/docs/en/rest-api/security/grant-api-keys.asciidoc index 3d38f82cb95d..ad16f602d32c 100644 --- a/x-pack/docs/en/rest-api/security/grant-api-keys.asciidoc +++ b/x-pack/docs/en/rest-api/security/grant-api-keys.asciidoc @@ -75,8 +75,7 @@ key. This parameter is optional. When it is not specified or is an empty array, the API key has a point in time snapshot of permissions of the specified user or access token. If you supply role descriptors, the resultant permissions are an intersection of API keys permissions and the permissions of the user or access -token. The structure of role descriptor is the same as the request for create -role API. For more details, see <>. +token. The structure of a role descriptor is the same as the request for <>. `metadata`::: (Optional, object) Arbitrary metadata that you want to associate with the API key. @@ -128,7 +127,7 @@ POST /_security/api_key/grant "role_descriptors": { "role-a": { "cluster": ["all"], - "index": [ + "indices": [ { "names": ["index-a*"], "privileges": ["read"] @@ -137,7 +136,7 @@ POST /_security/api_key/grant }, "role-b": { "cluster": ["all"], - "index": [ + "indices": [ { "names": ["index-b*"], "privileges": ["all"] diff --git a/x-pack/docs/en/rest-api/security/update-api-key.asciidoc b/x-pack/docs/en/rest-api/security/update-api-key.asciidoc index 8fc71c7b8257..0e18aba30362 100644 --- a/x-pack/docs/en/rest-api/security/update-api-key.asciidoc +++ b/x-pack/docs/en/rest-api/security/update-api-key.asciidoc @@ -59,7 +59,7 @@ You can assign new privileges by specifying them in this parameter. To remove assigned privileges, you can supply an empty `role_descriptors` parameter, i.e., an empty object `{}`. If an API key has no assigned privileges, it inherits the owner user's full permissions. The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter or not. -The structure of a role descriptor is the same as the request for the <>. +The structure of a role descriptor is the same as the request for the <>. `metadata`:: (Optional, object) Arbitrary metadata that you want to associate with the API key. @@ -87,7 +87,7 @@ POST /_security/api_key "role_descriptors": { "role-a": { "cluster": ["all"], - "index": [ + "indices": [ { "names": ["index-a*"], "privileges": ["read"] @@ -129,7 +129,7 @@ For the examples below, assume that the owner user's permissions are: -------------------------------------------------- { "cluster": ["all"], - "index": [ + "indices": [ { "names": ["*"], "privileges": ["all"] @@ -147,7 +147,7 @@ PUT /_security/api_key/VuaCfGcBCdbkQm-e5aOx { "role_descriptors": { "role-a": { - "index": [ + "indices": [ { "names": ["*"], "privileges": ["write"] @@ -181,7 +181,7 @@ The API key's effective permissions after the update will be the intersection of [source,js] -------------------------------------------------- { - "index": [ + "indices": [ { "names": ["*"], "privileges": ["write"] @@ -217,7 +217,7 @@ The API key's effective permissions after the update will be the same as the own -------------------------------------------------- { "cluster": ["all"], - "index": [ + "indices": [ { "names": ["*"], "privileges": ["all"] @@ -233,7 +233,7 @@ For the next example, assume that the owner user's permissions have changed from -------------------------------------------------- { "cluster": ["manage_security"], - "index": [ + "indices": [ { "names": ["*"], "privileges": ["read"] @@ -266,7 +266,7 @@ Resulting in the following effective permissions for the API key: -------------------------------------------------- { "cluster": ["manage_security"], - "index": [ + "indices": [ { "names": ["*"], "privileges": ["read"] diff --git a/x-pack/docs/en/security/authentication/jwt-realm.asciidoc b/x-pack/docs/en/security/authentication/jwt-realm.asciidoc index 3d84d4aa04d5..bb3934a2d7b9 100644 --- a/x-pack/docs/en/security/authentication/jwt-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/jwt-realm.asciidoc @@ -534,8 +534,8 @@ the `jwt_role1` role that you mapped to this user in the JWT realm: ---- If you want to specify a request as the `run_as` user, include the -the `es-security-runas-user` header with the name of the user that you -want to submit requests as. The following request uses the `user123_runas` user: +`es-security-runas-user` header with the name of the user that you want to +submit requests as. The following request uses the `user123_runas` user: [source,sh] ---- diff --git a/x-pack/docs/en/security/authorization/overview.asciidoc b/x-pack/docs/en/security/authorization/overview.asciidoc index fb5300049b38..9e31d45dbd5b 100644 --- a/x-pack/docs/en/security/authorization/overview.asciidoc +++ b/x-pack/docs/en/security/authorization/overview.asciidoc @@ -78,6 +78,8 @@ include::built-in-roles.asciidoc[] include::managing-roles.asciidoc[] +include::role-restriction.asciidoc[] + include::privileges.asciidoc[] include::document-level-security.asciidoc[] diff --git a/x-pack/docs/en/security/authorization/role-restriction.asciidoc b/x-pack/docs/en/security/authorization/role-restriction.asciidoc new file mode 100644 index 000000000000..58943a28352f --- /dev/null +++ b/x-pack/docs/en/security/authorization/role-restriction.asciidoc @@ -0,0 +1,31 @@ +[role="xpack"] +[[role-restriction]] +=== Role restriction + +Role restriction can be used to specify conditions under which a role should be effective. +When conditions are not met, the role will be disabled, which will result in access being denied. +Not specifying restriction means the role is not restricted and thus always effective. +This is the default behaviour. + +-- +NOTE: Currently, the role restriction is only supported for <>, +with limitation that the API key can only have a single role descriptor. +-- + +[[workflows-restriction]] +==== Workflows + +Workflows allow to restrict the role to be effective exclusively when calling certain REST APIs. +Calling a REST API that is not allowed by a workflow, will result in the role being disabled. +The below section lists workflows that you can restrict the role to: + +`search_application_query`::: This workflow restricts the role to the <> only. + +-- +NOTE: Workflow names are case-sensitive. +-- + +[discrete] +==== Examples + +include::../../rest-api/security/create-api-keys.asciidoc[tag=create-api-key-with-role-restriction-example] diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/AnalyticsPlugin.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/AnalyticsPlugin.java index da91cdd10148..dbefc3989ce4 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/AnalyticsPlugin.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/AnalyticsPlugin.java @@ -17,6 +17,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.mapper.Mapper; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.Plugin; @@ -187,7 +188,8 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { return List.of(usage); } diff --git a/x-pack/plugin/async/src/main/java/org/elasticsearch/xpack/async/AsyncResultsIndexPlugin.java b/x-pack/plugin/async/src/main/java/org/elasticsearch/xpack/async/AsyncResultsIndexPlugin.java index cc94e4817079..ffc2062b237f 100644 --- a/x-pack/plugin/async/src/main/java/org/elasticsearch/xpack/async/AsyncResultsIndexPlugin.java +++ b/x-pack/plugin/async/src/main/java/org/elasticsearch/xpack/async/AsyncResultsIndexPlugin.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.SystemIndexPlugin; @@ -73,7 +74,8 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { List components = new ArrayList<>(); if (DiscoveryNode.canContainData(environment.settings())) { diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/Autoscaling.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/Autoscaling.java index d1071a11e9ed..155c167c57d1 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/Autoscaling.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/Autoscaling.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.license.License; import org.elasticsearch.license.LicensedFeature; import org.elasticsearch.plugins.ActionPlugin; @@ -119,7 +120,8 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { this.clusterServiceHolder.set(clusterService); this.allocationServiceHolder.set(allocationService); diff --git a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderServiceTests.java b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderServiceTests.java index 279c20e977a3..4db1ca27a132 100644 --- a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderServiceTests.java +++ b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderServiceTests.java @@ -45,6 +45,7 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.snapshots.InternalSnapshotsInfoService; @@ -417,7 +418,7 @@ public void testSizeOfSnapshot() { RecoverySource.SnapshotRecoverySource recoverySource = new RecoverySource.SnapshotRecoverySource( UUIDs.randomBase64UUID(), new Snapshot(randomAlphaOfLength(5), new SnapshotId(randomAlphaOfLength(5), UUIDs.randomBase64UUID())), - Version.CURRENT, + IndexVersion.current(), new IndexId(randomAlphaOfLength(5), UUIDs.randomBase64UUID()) ); IndexMetadata indexMetadata = IndexMetadata.builder(randomAlphaOfLength(5)) diff --git a/x-pack/plugin/build.gradle b/x-pack/plugin/build.gradle index 8019a73cec8a..f19a6ba24027 100644 --- a/x-pack/plugin/build.gradle +++ b/x-pack/plugin/build.gradle @@ -224,6 +224,7 @@ testClusters.configureEach { extraConfigFile serviceTokens.name, serviceTokens requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0") + requiresFeature 'es.inference_rescorer_feature_flag_enabled', Version.fromString("8.10.0") } tasks.register('enforceApiSpecsConvention').configure { diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java index ec504ce6c96d..f7457eea5bef 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java @@ -31,6 +31,7 @@ import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.EngineFactory; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.persistent.PersistentTaskParams; import org.elasticsearch.persistent.PersistentTasksExecutor; @@ -185,7 +186,8 @@ public Collection createComponents( final IndexNameExpressionResolver expressionResolver, final Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { this.client = client; if (enabled == false) { diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java index f1a8ccc6eec2..2a278aa71253 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java @@ -45,6 +45,7 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.seqno.RetentionLeaseAlreadyExistsException; import org.elasticsearch.index.seqno.RetentionLeaseInvalidRetainingSeqNoException; @@ -197,7 +198,7 @@ public void getSnapshotInfo(GetSnapshotInfoContext context) { List.copyOf(indicesMap.keySet()), List.copyOf(responseMetadata.dataStreams().keySet()), List.of(), - response.getNodes().getMaxNodeVersion(), + response.getNodes().getMaxNodeVersion().indexVersion, SnapshotState.SUCCESS ); })) @@ -276,7 +277,7 @@ public void getRepositoryData(ActionListener listener) { final long nowMillis = threadPool.absoluteTimeInMillis(); snapshotsDetails.put( indexName, - new RepositoryData.SnapshotDetails(SnapshotState.SUCCESS, Version.CURRENT, nowMillis, nowMillis, "") + new RepositoryData.SnapshotDetails(SnapshotState.SUCCESS, IndexVersion.current(), nowMillis, nowMillis, "") ); indexSnapshots.put(new IndexId(indexName, remoteIndices.get(indexName).getIndex().getUUID()), List.of(snapshotId)); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java index 601ed4f9df87..40c3a35d6608 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java @@ -8,7 +8,6 @@ import org.apache.lucene.store.IOContext; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.flush.FlushRequest; @@ -37,6 +36,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.EngineTestCase; @@ -516,7 +516,7 @@ protected synchronized void recoverPrimary(IndexShard primaryShard) { new RecoverySource.SnapshotRecoverySource( UUIDs.randomBase64UUID(), snapshot, - Version.CURRENT, + IndexVersion.current(), new IndexId("test", UUIDs.randomBase64UUID(random())) ) ); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/allocation/CcrPrimaryFollowerAllocationDeciderTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/allocation/CcrPrimaryFollowerAllocationDeciderTests.java index b0cd6844a2d2..d27d1478518b 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/allocation/CcrPrimaryFollowerAllocationDeciderTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/allocation/CcrPrimaryFollowerAllocationDeciderTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotId; @@ -199,7 +200,7 @@ static RecoverySource.SnapshotRecoverySource newSnapshotRecoverySource() { return new RecoverySource.SnapshotRecoverySource( UUIDs.randomBase64UUID(), snapshot, - Version.CURRENT, + IndexVersion.current(), new IndexId("test", UUIDs.randomBase64UUID(random())) ); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java index 4561bfe42612..2156995f2e0f 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.ccr.index.engine; import org.apache.lucene.store.IOContext; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActionTestUtils; @@ -23,6 +22,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Releasable; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.shard.IndexShard; @@ -134,7 +134,7 @@ public void testRestoreShard() throws IOException { new RecoverySource.SnapshotRecoverySource( UUIDs.randomBase64UUID(), snapshot, - Version.CURRENT, + IndexVersion.current(), new IndexId("test", UUIDs.randomBase64UUID(random())) ) ); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java index fb28b5915f4a..bca6a5ed8649 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java @@ -43,6 +43,7 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.mapper.MetadataFieldMapper; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.license.ClusterStateLicenseService; import org.elasticsearch.license.License; @@ -318,7 +319,8 @@ public Collection createComponents( IndexNameExpressionResolver expressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { List components = new ArrayList<>(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleAction.java index 9613772a1607..d8a3ea1d7568 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleAction.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.MasterNodeRequest; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleIndexerAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleIndexerAction.java index 22805ae8b6ca..3e8be842db38 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleIndexerAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleIndexerAction.java @@ -8,6 +8,7 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.broadcast.BroadcastRequest; import org.elasticsearch.action.support.broadcast.BroadcastResponse; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleTask.java index b5d46cf6bf56..fdd5662c29b9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleTask.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.core.downsample; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.xpack.core.rollup.RollupField; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java index 502966566344..abd2b88a9826 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.core.ilm; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexAbstraction; @@ -21,7 +22,6 @@ import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xpack.core.downsample.DownsampleConfig; import org.elasticsearch.xpack.core.ilm.Step.StepKey; import java.io.IOException; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleStep.java index c1307f53f575..4930098705ac 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleStep.java @@ -9,6 +9,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; @@ -18,7 +19,6 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.xpack.core.downsample.DownsampleAction; -import org.elasticsearch.xpack.core.downsample.DownsampleConfig; import java.util.Objects; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/MlLTRNamedXContentProvider.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/MlLTRNamedXContentProvider.java new file mode 100644 index 000000000000..9c2b9522ac15 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/MlLTRNamedXContentProvider.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.core.ml.inference; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.plugins.spi.NamedXContentProvider; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfigUpdate; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearnToRankConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearnToRankConfigUpdate; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.LenientlyParsedInferenceConfig; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.StrictlyParsedInferenceConfig; + +import java.util.ArrayList; +import java.util.List; + +/** + * Only the LTR named writeables and xcontent. Remove and combine with inference provider + * when feature flag is removed + */ +public class MlLTRNamedXContentProvider implements NamedXContentProvider { + + @Override + public List getNamedXContentParsers() { + List namedXContent = new ArrayList<>(); + // Lenient Inference Config + namedXContent.add( + new NamedXContentRegistry.Entry( + LenientlyParsedInferenceConfig.class, + LearnToRankConfig.NAME, + LearnToRankConfig::fromXContentLenient + ) + ); + // Strict Inference Config + namedXContent.add( + new NamedXContentRegistry.Entry( + StrictlyParsedInferenceConfig.class, + LearnToRankConfig.NAME, + LearnToRankConfig::fromXContentStrict + ) + ); + // Inference Config Update + namedXContent.add( + new NamedXContentRegistry.Entry( + InferenceConfigUpdate.class, + LearnToRankConfigUpdate.NAME, + LearnToRankConfigUpdate::fromXContentStrict + ) + ); + return namedXContent; + } + + public List getNamedWriteables() { + List namedWriteables = new ArrayList<>(); + // Inference config + namedWriteables.add( + new NamedWriteableRegistry.Entry(InferenceConfig.class, LearnToRankConfig.NAME.getPreferredName(), LearnToRankConfig::new) + ); + // Inference config update + namedWriteables.add( + new NamedWriteableRegistry.Entry( + InferenceConfigUpdate.class, + LearnToRankConfigUpdate.NAME.getPreferredName(), + LearnToRankConfigUpdate::new + ) + ); + return namedWriteables; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/InferenceConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/InferenceConfig.java index 00dfcee3e1b5..8f2d19d85b67 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/InferenceConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/InferenceConfig.java @@ -40,4 +40,16 @@ default boolean requestingImportance() { String getResultsField(); boolean isAllocateOnly(); + + default boolean supportsIngestPipeline() { + return true; + } + + default boolean supportsPipelineAggregation() { + return true; + } + + default boolean supportsSearchRescorer() { + return false; + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfig.java new file mode 100644 index 000000000000..48eb001fb378 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfig.java @@ -0,0 +1,201 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.core.ml.inference.trainedmodel; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ltr.LearnToRankFeatureExtractorBuilder; +import org.elasticsearch.xpack.core.ml.utils.NamedXContentObjectHelper; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; + +public class LearnToRankConfig extends RegressionConfig { + + public static final ParseField NAME = new ParseField("learn_to_rank"); + static final TransportVersion MIN_SUPPORTED_TRANSPORT_VERSION = TransportVersion.current(); + public static final ParseField NUM_TOP_FEATURE_IMPORTANCE_VALUES = new ParseField("num_top_feature_importance_values"); + public static final ParseField FEATURE_EXTRACTORS = new ParseField("feature_extractors"); + public static LearnToRankConfig EMPTY_PARAMS = new LearnToRankConfig(null, null); + + private static final ObjectParser LENIENT_PARSER = createParser(true); + private static final ObjectParser STRICT_PARSER = createParser(false); + + private static ObjectParser createParser(boolean lenient) { + ObjectParser parser = new ObjectParser<>( + NAME.getPreferredName(), + lenient, + LearnToRankConfig.Builder::new + ); + parser.declareInt(Builder::setNumTopFeatureImportanceValues, NUM_TOP_FEATURE_IMPORTANCE_VALUES); + parser.declareNamedObjects( + Builder::setLearnToRankFeatureExtractorBuilders, + (p, c, n) -> p.namedObject(LearnToRankFeatureExtractorBuilder.class, n, lenient), + b -> {}, + FEATURE_EXTRACTORS + ); + return parser; + } + + public static LearnToRankConfig fromXContentStrict(XContentParser parser) { + return STRICT_PARSER.apply(parser, null).build(); + } + + public static LearnToRankConfig fromXContentLenient(XContentParser parser) { + return LENIENT_PARSER.apply(parser, null).build(); + } + + private final List featureExtractorBuilders; + + public LearnToRankConfig(Integer numTopFeatureImportanceValues, List featureExtractorBuilders) { + super(DEFAULT_RESULTS_FIELD, numTopFeatureImportanceValues); + if (featureExtractorBuilders != null) { + Set featureNames = featureExtractorBuilders.stream() + .map(LearnToRankFeatureExtractorBuilder::featureName) + .collect(Collectors.toSet()); + if (featureNames.size() < featureExtractorBuilders.size()) { + throw new IllegalArgumentException( + "[" + FEATURE_EXTRACTORS.getPreferredName() + "] contains duplicate [feature_name] values" + ); + } + } + this.featureExtractorBuilders = featureExtractorBuilders == null ? List.of() : featureExtractorBuilders; + } + + public LearnToRankConfig(StreamInput in) throws IOException { + super(in); + this.featureExtractorBuilders = in.readNamedWriteableList(LearnToRankFeatureExtractorBuilder.class); + } + + public List getFeatureExtractorBuilders() { + return featureExtractorBuilders; + } + + @Override + public String getResultsField() { + return DEFAULT_RESULTS_FIELD; + } + + @Override + public boolean isAllocateOnly() { + return false; + } + + @Override + public boolean supportsIngestPipeline() { + return false; + } + + @Override + public boolean supportsPipelineAggregation() { + return false; + } + + @Override + public boolean supportsSearchRescorer() { + return true; + } + + @Override + public String getWriteableName() { + return NAME.getPreferredName(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeNamedWriteableList(featureExtractorBuilders); + } + + @Override + public String getName() { + return NAME.getPreferredName(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(NUM_TOP_FEATURE_IMPORTANCE_VALUES.getPreferredName(), getNumTopFeatureImportanceValues()); + if (featureExtractorBuilders.isEmpty() == false) { + NamedXContentObjectHelper.writeNamedObjects( + builder, + params, + true, + FEATURE_EXTRACTORS.getPreferredName(), + featureExtractorBuilders + ); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (super.equals(o) == false) return false; + LearnToRankConfig that = (LearnToRankConfig) o; + return Objects.equals(featureExtractorBuilders, that.featureExtractorBuilders); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), featureExtractorBuilders); + } + + @Override + public boolean isTargetTypeSupported(TargetType targetType) { + return TargetType.REGRESSION.equals(targetType); + } + + @Override + public Version getMinimalSupportedNodeVersion() { + return Version.CURRENT; + } + + @Override + public TransportVersion getMinimalSupportedTransportVersion() { + return MIN_SUPPORTED_TRANSPORT_VERSION; + } + + public static class Builder { + private Integer numTopFeatureImportanceValues; + private List learnToRankFeatureExtractorBuilders; + + Builder() {} + + Builder(LearnToRankConfig config) { + this.numTopFeatureImportanceValues = config.getNumTopFeatureImportanceValues(); + this.learnToRankFeatureExtractorBuilders = config.featureExtractorBuilders; + } + + public Builder setNumTopFeatureImportanceValues(Integer numTopFeatureImportanceValues) { + this.numTopFeatureImportanceValues = numTopFeatureImportanceValues; + return this; + } + + public Builder setLearnToRankFeatureExtractorBuilders( + List learnToRankFeatureExtractorBuilders + ) { + this.learnToRankFeatureExtractorBuilders = learnToRankFeatureExtractorBuilders; + return this; + } + + public LearnToRankConfig build() { + return new LearnToRankConfig(numTopFeatureImportanceValues, learnToRankFeatureExtractorBuilders); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfigUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfigUpdate.java new file mode 100644 index 000000000000..8030b31f396a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfigUpdate.java @@ -0,0 +1,228 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.core.ml.inference.trainedmodel; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ltr.LearnToRankFeatureExtractorBuilder; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.NamedXContentObject; +import org.elasticsearch.xpack.core.ml.utils.NamedXContentObjectHelper; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig.DEFAULT_RESULTS_FIELD; +import static org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearnToRankConfig.FEATURE_EXTRACTORS; +import static org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearnToRankConfig.NUM_TOP_FEATURE_IMPORTANCE_VALUES; + +public class LearnToRankConfigUpdate implements InferenceConfigUpdate, NamedXContentObject { + + public static final ParseField NAME = LearnToRankConfig.NAME; + + public static LearnToRankConfigUpdate EMPTY_PARAMS = new LearnToRankConfigUpdate(null, null); + + public static LearnToRankConfigUpdate fromConfig(LearnToRankConfig config) { + return new LearnToRankConfigUpdate(config.getNumTopFeatureImportanceValues(), config.getFeatureExtractorBuilders()); + } + + private static final ObjectParser STRICT_PARSER = createParser(false); + + private static ObjectParser createParser(boolean lenient) { + ObjectParser parser = new ObjectParser<>( + NAME.getPreferredName(), + lenient, + LearnToRankConfigUpdate.Builder::new + ); + parser.declareInt(LearnToRankConfigUpdate.Builder::setNumTopFeatureImportanceValues, NUM_TOP_FEATURE_IMPORTANCE_VALUES); + parser.declareNamedObjects( + LearnToRankConfigUpdate.Builder::setFeatureExtractorBuilders, + (p, c, n) -> p.namedObject(LearnToRankFeatureExtractorBuilder.class, n, false), + b -> {}, + FEATURE_EXTRACTORS + ); + return parser; + } + + public static LearnToRankConfigUpdate fromXContentStrict(XContentParser parser) { + return STRICT_PARSER.apply(parser, null).build(); + } + + private final Integer numTopFeatureImportanceValues; + private final List featureExtractorBuilderList; + + public LearnToRankConfigUpdate( + Integer numTopFeatureImportanceValues, + List featureExtractorBuilders + ) { + if (numTopFeatureImportanceValues != null && numTopFeatureImportanceValues < 0) { + throw new IllegalArgumentException( + "[" + NUM_TOP_FEATURE_IMPORTANCE_VALUES.getPreferredName() + "] must be greater than or equal to 0" + ); + } + if (featureExtractorBuilders != null) { + Set featureNames = featureExtractorBuilders.stream() + .map(LearnToRankFeatureExtractorBuilder::featureName) + .collect(Collectors.toSet()); + if (featureNames.size() < featureExtractorBuilders.size()) { + throw new IllegalArgumentException( + "[" + FEATURE_EXTRACTORS.getPreferredName() + "] contains duplicate [feature_name] values" + ); + } + } + this.numTopFeatureImportanceValues = numTopFeatureImportanceValues; + this.featureExtractorBuilderList = featureExtractorBuilders == null ? List.of() : featureExtractorBuilders; + } + + public LearnToRankConfigUpdate(StreamInput in) throws IOException { + this.numTopFeatureImportanceValues = in.readOptionalVInt(); + this.featureExtractorBuilderList = in.readNamedWriteableList(LearnToRankFeatureExtractorBuilder.class); + } + + public Integer getNumTopFeatureImportanceValues() { + return numTopFeatureImportanceValues; + } + + @Override + public String getResultsField() { + return DEFAULT_RESULTS_FIELD; + } + + @Override + public InferenceConfigUpdate.Builder, ? extends InferenceConfigUpdate> newBuilder() { + return new Builder().setNumTopFeatureImportanceValues(numTopFeatureImportanceValues); + } + + @Override + public String getWriteableName() { + return NAME.getPreferredName(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalVInt(numTopFeatureImportanceValues); + out.writeNamedWriteableList(featureExtractorBuilderList); + } + + @Override + public String getName() { + return NAME.getPreferredName(); + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return LearnToRankConfig.MIN_SUPPORTED_TRANSPORT_VERSION; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (numTopFeatureImportanceValues != null) { + builder.field(NUM_TOP_FEATURE_IMPORTANCE_VALUES.getPreferredName(), numTopFeatureImportanceValues); + } + if (featureExtractorBuilderList.isEmpty() == false) { + NamedXContentObjectHelper.writeNamedObjects( + builder, + params, + true, + FEATURE_EXTRACTORS.getPreferredName(), + featureExtractorBuilderList + ); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + LearnToRankConfigUpdate that = (LearnToRankConfigUpdate) o; + return Objects.equals(this.numTopFeatureImportanceValues, that.numTopFeatureImportanceValues) + && Objects.equals(this.featureExtractorBuilderList, that.featureExtractorBuilderList); + } + + @Override + public int hashCode() { + return Objects.hash(numTopFeatureImportanceValues, featureExtractorBuilderList); + } + + @Override + public LearnToRankConfig apply(InferenceConfig originalConfig) { + if (originalConfig instanceof LearnToRankConfig == false) { + throw ExceptionsHelper.badRequestException( + "Inference config of type [{}] can not be updated with a inference request of type [{}]", + originalConfig.getName(), + getName() + ); + } + + LearnToRankConfig ltrConfig = (LearnToRankConfig) originalConfig; + if (isNoop(ltrConfig)) { + return ltrConfig; + } + LearnToRankConfig.Builder builder = new LearnToRankConfig.Builder(ltrConfig); + if (numTopFeatureImportanceValues != null) { + builder.setNumTopFeatureImportanceValues(numTopFeatureImportanceValues); + } + if (featureExtractorBuilderList.isEmpty() == false) { + Map existingExtractors = ltrConfig.getFeatureExtractorBuilders() + .stream() + .collect(Collectors.toMap(LearnToRankFeatureExtractorBuilder::featureName, f -> f)); + featureExtractorBuilderList.forEach(f -> existingExtractors.put(f.featureName(), f)); + builder.setLearnToRankFeatureExtractorBuilders(new ArrayList<>(existingExtractors.values())); + } + return builder.build(); + } + + @Override + public boolean isSupported(InferenceConfig inferenceConfig) { + return inferenceConfig instanceof LearnToRankConfig; + } + + boolean isNoop(LearnToRankConfig originalConfig) { + return (numTopFeatureImportanceValues == null || originalConfig.getNumTopFeatureImportanceValues() == numTopFeatureImportanceValues) + && (featureExtractorBuilderList.isEmpty() + || Objects.equals(originalConfig.getFeatureExtractorBuilders(), featureExtractorBuilderList)); + } + + public static class Builder implements InferenceConfigUpdate.Builder { + private Integer numTopFeatureImportanceValues; + private List featureExtractorBuilderList; + + @Override + public Builder setResultsField(String resultsField) { + assert false : "results field should never be set in ltr config"; + return this; + } + + public Builder setNumTopFeatureImportanceValues(Integer numTopFeatureImportanceValues) { + this.numTopFeatureImportanceValues = numTopFeatureImportanceValues; + return this; + } + + public Builder setFeatureExtractorBuilders(List featureExtractorBuilderList) { + this.featureExtractorBuilderList = featureExtractorBuilderList; + return this; + } + + @Override + public LearnToRankConfigUpdate build() { + return new LearnToRankConfigUpdate(numTopFeatureImportanceValues, featureExtractorBuilderList); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/NlpConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/NlpConfig.java index c39d6103b772..fe67fa52d860 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/NlpConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/NlpConfig.java @@ -29,4 +29,19 @@ public interface NlpConfig extends LenientlyParsedInferenceConfig, StrictlyParse * @return the model tokenization parameters */ Tokenization getTokenization(); + + @Override + default boolean supportsIngestPipeline() { + return true; + } + + @Override + default boolean supportsPipelineAggregation() { + return false; + } + + @Override + default boolean supportsSearchRescorer() { + return false; + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ltr/LearnToRankFeatureExtractorBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ltr/LearnToRankFeatureExtractorBuilder.java new file mode 100644 index 000000000000..590ff7a7422a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ltr/LearnToRankFeatureExtractorBuilder.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.ml.inference.trainedmodel.ltr; + +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xpack.core.ml.utils.NamedXContentObject; + +public interface LearnToRankFeatureExtractorBuilder extends NamedXContentObject, NamedWriteable { + + ParseField FEATURE_NAME = new ParseField("feature_name"); + + /** + * @return The input feature that this extractor satisfies + */ + String featureName(); +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupShardTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupShardTask.java index ccf80afd0ae4..49a32388845b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupShardTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupShardTask.java @@ -7,10 +7,10 @@ package org.elasticsearch.xpack.core.rollup.action; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.TaskId; -import org.elasticsearch.xpack.core.downsample.DownsampleConfig; import org.elasticsearch.xpack.core.rollup.RollupField; import java.util.Map; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java index e9b4a27ea741..127a84b103ec 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java @@ -6,33 +6,79 @@ */ package org.elasticsearch.xpack.core.security.action.user; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.security.authc.Authentication; import java.io.IOException; +import java.util.Objects; -public class AuthenticateResponse extends ActionResponse { +public class AuthenticateResponse extends ActionResponse implements ToXContent { - private Authentication authentication; + public static final TransportVersion VERSION_OPERATOR_FIELD = TransportVersion.V_8_500_028; + + private final Authentication authentication; + private final boolean operator; public AuthenticateResponse(StreamInput in) throws IOException { super(in); authentication = new Authentication(in); + if (in.getTransportVersion().onOrAfter(VERSION_OPERATOR_FIELD)) { + operator = in.readBoolean(); + } else { + operator = false; + } } - public AuthenticateResponse(Authentication authentication) { - this.authentication = authentication; + public AuthenticateResponse(Authentication authentication, boolean operator) { + this.authentication = Objects.requireNonNull(authentication); + this.operator = operator; } public Authentication authentication() { return authentication; } + public boolean isOperator() { + return operator; + } + @Override public void writeTo(StreamOutput out) throws IOException { authentication.writeTo(out); + if (out.getTransportVersion().onOrAfter(VERSION_OPERATOR_FIELD)) { + out.writeBoolean(operator); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + authentication.toXContentFragment(builder); + if (this.operator) { + builder.field("operator", true); + } + return builder.endObject(); } + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + AuthenticateResponse that = (AuthenticateResponse) o; + return this.operator == that.operator && this.authentication.equals(that.authentication); + } + + @Override + public int hashCode() { + return Objects.hash(authentication, operator); + } } diff --git a/x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/profiler/ilm-policy/profiling-60-days.json b/x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/profiler/ilm-policy/profiling-60-days.json index 5d2cad8f951d..130b2fc59a5b 100644 --- a/x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/profiler/ilm-policy/profiling-60-days.json +++ b/x-pack/plugin/core/src/main/resources/org/elasticsearch/xpack/profiler/ilm-policy/profiling-60-days.json @@ -18,9 +18,6 @@ "actions": { "set_priority": { "priority": 50 - }, - "forcemerge": { - "max_num_segments": 1 } } }, diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotShardTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotShardTests.java index adaddaeb4700..58133ecd1aa9 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotShardTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotShardTests.java @@ -46,6 +46,7 @@ import org.elasticsearch.core.IOUtils; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineException; @@ -358,7 +359,7 @@ public void onFailure(Exception e) { new RecoverySource.SnapshotRecoverySource( UUIDs.randomBase64UUID(), new Snapshot("src_only", snapshotId), - Version.CURRENT, + IndexVersion.current(), indexId ) ); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java index f33cce809736..6e7383ea314f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.core; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; @@ -51,9 +50,11 @@ import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettingProvider; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.analysis.TokenizerFactory; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.mapper.MetadataFieldMapper; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.indices.analysis.AnalysisModule; import org.elasticsearch.indices.breaker.CircuitBreakerService; @@ -204,7 +205,8 @@ public Collection createComponents( IndexNameExpressionResolver expressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { List components = new ArrayList<>(); components.addAll( @@ -221,7 +223,8 @@ public Collection createComponents( expressionResolver, repositoriesServiceSupplier, tracer, - allocationService + allocationService, + indicesService ) ); @@ -241,7 +244,8 @@ public Collection createComponents( expressionResolver, repositoriesServiceSupplier, tracer, - allocationService + allocationService, + indicesService ) ) ); @@ -590,8 +594,8 @@ public Map getInternalRepositories( } @Override - public BiConsumer addPreRestoreVersionCheck() { - List> checks = filterPlugins(RepositoryPlugin.class).stream() + public BiConsumer addPreRestoreVersionCheck() { + List> checks = filterPlugins(RepositoryPlugin.class).stream() .map(RepositoryPlugin::addPreRestoreVersionCheck) .filter(Objects::nonNull) .collect(Collectors.toList()); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackPluginTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackPluginTests.java index 2966c9d35d88..23633138d570 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackPluginTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/XPackPluginTests.java @@ -160,6 +160,7 @@ public List loadExtensions(Class extensionPointType) { null, null, null, + null, null ); assertEquals(license, XPackPlugin.getSharedLicenseService().getLicense()); @@ -213,6 +214,7 @@ public List loadExtensions(Class extensionPointType) { null, null, null, + null, null ); assertThat(XPackPlugin.getSharedLicenseService(), instanceOf(ClusterStateLicenseService.class)); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/validation/SourceDestValidatorTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/validation/SourceDestValidatorTests.java index 5f9365b09e8b..564abc6f80c6 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/validation/SourceDestValidatorTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/validation/SourceDestValidatorTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.CheckedConsumer; @@ -103,7 +104,7 @@ public class SourceDestValidatorTests extends ESTestCase { private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); private final TransportService transportService = MockTransportService.createNewService( Settings.EMPTY, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool ); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SetSingleNodeAllocateStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SetSingleNodeAllocateStepTests.java index 54f90fc84e32..81c1ad55d5d1 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SetSingleNodeAllocateStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SetSingleNodeAllocateStepTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRoutingState; @@ -29,6 +30,7 @@ import org.elasticsearch.common.util.Maps; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.node.Node; import org.elasticsearch.test.VersionUtils; @@ -417,7 +419,7 @@ public void testPerformActionSomeShardsOnlyOnNewNodes() throws Exception { new TransportAddress(TransportAddress.META_ADDRESS, nodePort), Node.NODE_ATTRIBUTES.getAsMap(nodeSettings), DiscoveryNode.getRolesFromSettings(nodeSettings), - oldVersion + new VersionInformation(oldVersion, IndexVersion.MINIMUM_COMPATIBLE, IndexVersion.current()) ) ); } @@ -489,7 +491,7 @@ public void testPerformActionSomeShardsOnlyOnNewNodesButNewNodesInvalidAttrs() { new TransportAddress(TransportAddress.META_ADDRESS, nodePort), Node.NODE_ATTRIBUTES.getAsMap(nodeSettings), DiscoveryNode.getRolesFromSettings(nodeSettings), - oldVersion + new VersionInformation(oldVersion, IndexVersion.MINIMUM_COMPATIBLE, IndexVersion.current()) ) ); } @@ -559,7 +561,7 @@ public void testPerformActionNewShardsExistButWithInvalidAttributes() throws Exc new TransportAddress(TransportAddress.META_ADDRESS, nodePort), Node.NODE_ATTRIBUTES.getAsMap(nodeSettings), DiscoveryNode.getRolesFromSettings(nodeSettings), - oldVersion + new VersionInformation(oldVersion, IndexVersion.MINIMUM_COMPATIBLE, IndexVersion.current()) ) ); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/InferenceConfigItemTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/InferenceConfigItemTestCase.java index d18b17791ef2..ffa933455136 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/InferenceConfigItemTestCase.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/InferenceConfigItemTestCase.java @@ -74,14 +74,16 @@ static InferenceConfig mutateForVersion(NlpConfig inferenceConfig, TransportVers protected NamedXContentRegistry xContentRegistry() { List namedXContent = new ArrayList<>(); namedXContent.addAll(new MlInferenceNamedXContentProvider().getNamedXContentParsers()); + namedXContent.addAll(new MlLTRNamedXContentProvider().getNamedXContentParsers()); namedXContent.addAll(new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedXContents()); return new NamedXContentRegistry(namedXContent); } @Override protected NamedWriteableRegistry getNamedWriteableRegistry() { - List entries = new ArrayList<>(new MlInferenceNamedXContentProvider().getNamedWriteables()); - return new NamedWriteableRegistry(entries); + List namedWriteables = new ArrayList<>(new MlInferenceNamedXContentProvider().getNamedWriteables()); + namedWriteables.addAll(new MlLTRNamedXContentProvider().getNamedWriteables()); + return new NamedWriteableRegistry(namedWriteables); } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfigTests.java new file mode 100644 index 000000000000..75923354eaa0 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfigTests.java @@ -0,0 +1,203 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.core.ml.inference.trainedmodel; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.inference.InferenceConfigItemTestCase; +import org.elasticsearch.xpack.core.ml.inference.MlInferenceNamedXContentProvider; +import org.elasticsearch.xpack.core.ml.inference.MlLTRNamedXContentProvider; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ltr.LearnToRankFeatureExtractorBuilder; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.function.Predicate; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + +public class LearnToRankConfigTests extends InferenceConfigItemTestCase { + private boolean lenient; + + public static LearnToRankConfig randomLearnToRankConfig() { + return new LearnToRankConfig( + randomBoolean() ? null : randomIntBetween(0, 10), + randomBoolean() + ? null + : Stream.generate(() -> new TestValueExtractor(randomAlphaOfLength(10))).limit(randomInt(5)).collect(Collectors.toList()) + ); + } + + @Before + public void chooseStrictOrLenient() { + lenient = randomBoolean(); + } + + @Override + protected LearnToRankConfig createTestInstance() { + return randomLearnToRankConfig(); + } + + @Override + protected LearnToRankConfig mutateInstance(LearnToRankConfig instance) { + return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return field -> field.isEmpty() == false; + } + + @Override + protected Writeable.Reader instanceReader() { + return LearnToRankConfig::new; + } + + @Override + protected LearnToRankConfig doParseInstance(XContentParser parser) throws IOException { + return lenient ? LearnToRankConfig.fromXContentLenient(parser) : LearnToRankConfig.fromXContentStrict(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return lenient; + } + + @Override + protected LearnToRankConfig mutateInstanceForVersion(LearnToRankConfig instance, TransportVersion version) { + return instance; + } + + public void testDuplicateFeatureNames() { + List featureExtractorBuilderList = List.of( + new TestValueExtractor("foo"), + new TestValueExtractor("foo") + ); + expectThrows( + IllegalArgumentException.class, + () -> new LearnToRankConfig(randomBoolean() ? null : randomIntBetween(0, 10), featureExtractorBuilderList) + ); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List namedXContent = new ArrayList<>(); + namedXContent.addAll(new MlInferenceNamedXContentProvider().getNamedXContentParsers()); + namedXContent.addAll(new MlLTRNamedXContentProvider().getNamedXContentParsers()); + namedXContent.addAll(new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedXContents()); + namedXContent.add( + new NamedXContentRegistry.Entry( + LearnToRankFeatureExtractorBuilder.class, + TestValueExtractor.NAME, + TestValueExtractor::fromXContent + ) + ); + return new NamedXContentRegistry(namedXContent); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + List namedWriteables = new ArrayList<>(new MlInferenceNamedXContentProvider().getNamedWriteables()); + namedWriteables.addAll(new MlLTRNamedXContentProvider().getNamedWriteables()); + namedWriteables.add( + new NamedWriteableRegistry.Entry( + LearnToRankFeatureExtractorBuilder.class, + TestValueExtractor.NAME.getPreferredName(), + TestValueExtractor::new + ) + ); + return new NamedWriteableRegistry(namedWriteables); + } + + static class TestValueExtractor implements LearnToRankFeatureExtractorBuilder { + public static final ParseField NAME = new ParseField("test"); + private final String featureName; + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + NAME.getPreferredName(), + a -> new TestValueExtractor((String) a[0]) + ); + private static final ConstructingObjectParser LENIENT_PARSER = new ConstructingObjectParser<>( + NAME.getPreferredName(), + true, + a -> new TestValueExtractor((String) a[0]) + ); + static { + PARSER.declareString(constructorArg(), FEATURE_NAME); + LENIENT_PARSER.declareString(constructorArg(), FEATURE_NAME); + } + + public static TestValueExtractor fromXContent(XContentParser parser, Object context) { + boolean lenient = Boolean.TRUE.equals(context); + return lenient ? LENIENT_PARSER.apply(parser, null) : PARSER.apply(parser, null); + } + + TestValueExtractor(StreamInput in) throws IOException { + this.featureName = in.readString(); + } + + TestValueExtractor(String featureName) { + this.featureName = featureName; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(FEATURE_NAME.getPreferredName(), featureName); + builder.endObject(); + return builder; + } + + @Override + public String getWriteableName() { + return NAME.getPreferredName(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(featureName); + } + + @Override + public String featureName() { + return featureName; + } + + @Override + public String getName() { + return NAME.getPreferredName(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + TestValueExtractor that = (TestValueExtractor) o; + return Objects.equals(featureName, that.featureName); + } + + @Override + public int hashCode() { + return Objects.hash(featureName); + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfigUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfigUpdateTests.java new file mode 100644 index 000000000000..97c0358209fa --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfigUpdateTests.java @@ -0,0 +1,120 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.core.ml.inference.trainedmodel; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.AbstractBWCSerializationTestCase; +import org.elasticsearch.xpack.core.ml.inference.MlInferenceNamedXContentProvider; +import org.elasticsearch.xpack.core.ml.inference.MlLTRNamedXContentProvider; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ltr.LearnToRankFeatureExtractorBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearnToRankConfigTests.randomLearnToRankConfig; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.in; +import static org.hamcrest.Matchers.is; + +public class LearnToRankConfigUpdateTests extends AbstractBWCSerializationTestCase { + + public static LearnToRankConfigUpdate randomLearnToRankConfigUpdate() { + return new LearnToRankConfigUpdate(randomBoolean() ? null : randomIntBetween(0, 10), null); + } + + public void testApply() { + LearnToRankConfig originalConfig = randomLearnToRankConfig(); + assertThat(originalConfig, equalTo(LearnToRankConfigUpdate.EMPTY_PARAMS.apply(originalConfig))); + assertThat( + new LearnToRankConfig.Builder(originalConfig).setNumTopFeatureImportanceValues(5).build(), + equalTo(new LearnToRankConfigUpdate.Builder().setNumTopFeatureImportanceValues(5).build().apply(originalConfig)) + ); + assertThat( + new LearnToRankConfig.Builder(originalConfig).setNumTopFeatureImportanceValues(1).build(), + equalTo(new LearnToRankConfigUpdate.Builder().setNumTopFeatureImportanceValues(1).build().apply(originalConfig)) + ); + + LearnToRankFeatureExtractorBuilder extractorBuilder = new LearnToRankConfigTests.TestValueExtractor("foo"); + LearnToRankFeatureExtractorBuilder extractorBuilder2 = new LearnToRankConfigTests.TestValueExtractor("bar"); + + LearnToRankConfig config = new LearnToRankConfigUpdate.Builder().setNumTopFeatureImportanceValues(1) + .setFeatureExtractorBuilders(List.of(extractorBuilder2, extractorBuilder)) + .build() + .apply(originalConfig); + assertThat(config.getNumTopFeatureImportanceValues(), equalTo(1)); + assertThat(extractorBuilder2, is(in(config.getFeatureExtractorBuilders()))); + assertThat(extractorBuilder, is(in(config.getFeatureExtractorBuilders()))); + } + + @Override + protected LearnToRankConfigUpdate createTestInstance() { + return randomLearnToRankConfigUpdate(); + } + + @Override + protected LearnToRankConfigUpdate mutateInstance(LearnToRankConfigUpdate instance) { + return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 + } + + @Override + protected Writeable.Reader instanceReader() { + return LearnToRankConfigUpdate::new; + } + + @Override + protected LearnToRankConfigUpdate doParseInstance(XContentParser parser) throws IOException { + return LearnToRankConfigUpdate.fromXContentStrict(parser); + } + + @Override + protected LearnToRankConfigUpdate mutateInstanceForVersion(LearnToRankConfigUpdate instance, TransportVersion version) { + return instance; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List namedXContent = new ArrayList<>(); + namedXContent.addAll(new MlInferenceNamedXContentProvider().getNamedXContentParsers()); + namedXContent.addAll(new MlLTRNamedXContentProvider().getNamedXContentParsers()); + namedXContent.addAll(new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedXContents()); + namedXContent.add( + new NamedXContentRegistry.Entry( + LearnToRankFeatureExtractorBuilder.class, + LearnToRankConfigTests.TestValueExtractor.NAME, + LearnToRankConfigTests.TestValueExtractor::fromXContent + ) + ); + return new NamedXContentRegistry(namedXContent); + } + + @Override + protected NamedWriteableRegistry writableRegistry() { + List namedWriteables = new ArrayList<>(new MlInferenceNamedXContentProvider().getNamedWriteables()); + namedWriteables.addAll(new MlLTRNamedXContentProvider().getNamedWriteables()); + namedWriteables.add( + new NamedWriteableRegistry.Entry( + LearnToRankFeatureExtractorBuilder.class, + LearnToRankConfigTests.TestValueExtractor.NAME.getPreferredName(), + LearnToRankConfigTests.TestValueExtractor::new + ) + ); + return new NamedWriteableRegistry(namedWriteables); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return writableRegistry(); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/DownsampleActionConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/DownsampleActionConfigTests.java index 67f081bf7bf0..0f0ed022b3ca 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/DownsampleActionConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/DownsampleActionConfigTests.java @@ -6,11 +6,11 @@ */ package org.elasticsearch.xpack.core.rollup; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.test.AbstractXContentSerializingTestCase; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xpack.core.downsample.DownsampleConfig; import java.io.IOException; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponseTests.java new file mode 100644 index 000000000000..a211a4cd4762 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponseTests.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.security.authc.AuthenticationTestHelper; + +import java.io.IOException; + +public class AuthenticateResponseTests extends AbstractWireSerializingTestCase { + + @Override + protected Writeable.Reader instanceReader() { + return AuthenticateResponse::new; + } + + @Override + protected AuthenticateResponse createTestInstance() { + return new AuthenticateResponse(AuthenticationTestHelper.builder().build(), randomBoolean()); + } + + @Override + protected AuthenticateResponse mutateInstance(AuthenticateResponse instance) throws IOException { + if (randomBoolean()) { + return new AuthenticateResponse( + randomValueOtherThanMany(instance::equals, () -> AuthenticationTestHelper.builder().build()), + instance.isOperator() + ); + } else { + return new AuthenticateResponse(instance.authentication(), instance.isOperator() == false); + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/action/RestTermsEnumActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/action/RestTermsEnumActionTests.java index 656534d6f524..3cbcfe6c074c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/action/RestTermsEnumActionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/action/RestTermsEnumActionTests.java @@ -52,14 +52,7 @@ public class RestTermsEnumActionTests extends ESTestCase { private static NodeClient client = new NodeClient(Settings.EMPTY, threadPool); private static UsageService usageService = new UsageService(); - private static RestController controller = new RestController( - null, - client, - new NoneCircuitBreakerService(), - usageService, - Tracer.NOOP, - false - ); + private static RestController controller = new RestController(null, client, new NoneCircuitBreakerService(), usageService, Tracer.NOOP); private static RestTermsEnumAction action = new RestTermsEnumAction(); /** diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/ClusterDeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/ClusterDeprecationChecks.java deleted file mode 100644 index 76cf7dc6f2aa..000000000000 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/ClusterDeprecationChecks.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.deprecation; - -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.indices.ShardLimitValidator; -import org.elasticsearch.xpack.core.deprecation.DeprecationIssue; - -import java.util.Locale; - -import static org.elasticsearch.indices.ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE; - -public class ClusterDeprecationChecks { - /** - * Upgrading can require the addition of one or more small indices. This method checks that based on configuration we have the room - * to add a small number of additional shards to the cluster. The goal is to prevent a failure during upgrade. - * @param clusterState The cluster state, used to get settings and information about nodes - * @return A deprecation issue if there is not enough room in this cluster to add a few more shards, or null otherwise - */ - static DeprecationIssue checkShards(ClusterState clusterState) { - // Make sure we have room to add a small non-frozen index if needed - final int shardsInFutureNewSmallIndex = 5; - final int replicasForFutureIndex = 1; - final int maxConfiguredShardsPerNode = SETTING_CLUSTER_MAX_SHARDS_PER_NODE.get(clusterState.getMetadata().settings()); - ShardLimitValidator.Result shardLimitsResult = ShardLimitValidator.checkShardLimitForNormalNodes( - maxConfiguredShardsPerNode, - shardsInFutureNewSmallIndex, - replicasForFutureIndex, - clusterState - ); - if (shardLimitsResult.canAddShards()) { - return null; - } else { - return new DeprecationIssue( - DeprecationIssue.Level.WARNING, - "The cluster has too many shards to be able to upgrade", - "https://ela.st/es-deprecation-8-shard-limit", - String.format( - Locale.ROOT, - "Upgrading requires adding a small number of new shards. There is not enough room for %d more " - + "shards. Increase the cluster.max_shards_per_node setting, or remove indices " - + "to clear up resources.", - shardLimitsResult.totalShardsToAdd() - ), - false, - null - ); - } - } -} diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/Deprecation.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/Deprecation.java index b042915f755f..e094ce30e2f8 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/Deprecation.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/Deprecation.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; @@ -100,7 +101,8 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { final DeprecationIndexingTemplateRegistry templateRegistry = new DeprecationIndexingTemplateRegistry( environment.settings(), diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java index 86de7c6ae40c..4329cc65f262 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java @@ -14,8 +14,6 @@ import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.xpack.core.deprecation.DeprecationIssue; -import java.util.Arrays; -import java.util.Collections; import java.util.List; import java.util.Objects; import java.util.function.Function; @@ -35,9 +33,7 @@ public class DeprecationChecks { private DeprecationChecks() {} - static List> CLUSTER_SETTINGS_CHECKS = Collections.unmodifiableList( - Arrays.asList(ClusterDeprecationChecks::checkShards) - ); + static List> CLUSTER_SETTINGS_CHECKS = List.of(); static final List< NodeDeprecationCheck> NODE_SETTINGS_CHECKS = List diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java index 0aebc349c417..05463ef5e604 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java @@ -6,13 +6,13 @@ */ package org.elasticsearch.xpack.deprecation; -import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.LegacyFormatNames; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.engine.frozen.FrozenEngine; import org.elasticsearch.xpack.core.deprecation.DeprecationIssue; @@ -32,8 +32,8 @@ public class IndexDeprecationChecks { static DeprecationIssue oldIndicesCheck(IndexMetadata indexMetadata) { // TODO: this check needs to be revised. It's trivially true right now. - Version currentCompatibilityVersion = indexMetadata.getCompatibilityVersion(); - if (currentCompatibilityVersion.before(Version.V_7_0_0)) { + IndexVersion currentCompatibilityVersion = indexMetadata.getCompatibilityVersion(); + if (currentCompatibilityVersion.before(IndexVersion.V_7_0_0)) { return new DeprecationIssue( DeprecationIssue.Level.CRITICAL, "Old index with a compatibility version < 7.0", diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/ClusterDeprecationChecksTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/ClusterDeprecationChecksTests.java deleted file mode 100644 index cd38862f8a8f..000000000000 --- a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/ClusterDeprecationChecksTests.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.deprecation; - -import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.cluster.node.DiscoveryNodeUtils; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.allocation.DataTier; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.indices.ShardLimitValidator; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.core.deprecation.DeprecationIssue; - -import java.util.List; -import java.util.UUID; - -import static java.util.Collections.singletonList; -import static org.elasticsearch.xpack.deprecation.DeprecationChecks.CLUSTER_SETTINGS_CHECKS; -import static org.hamcrest.Matchers.equalTo; - -public class ClusterDeprecationChecksTests extends ESTestCase { - public void testCheckShards() { - /* - * This test sets the number of allowed shards per node to 5 and creates 2 nodes. So we have room for 10 shards, which is the - * number of shards that checkShards() is making sure we can add. The first time there are no indices, so the check passes. The - * next time there is an index with one shard and one replica, leaving room for 8 shards. So the check fails. - */ - final ClusterState state = ClusterState.builder(new ClusterName(randomAlphaOfLength(5))) - .metadata( - Metadata.builder() - .persistentSettings(Settings.builder().put(ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), 5).build()) - .build() - ) - .nodes( - DiscoveryNodes.builder() - .add(DiscoveryNodeUtils.create(UUID.randomUUID().toString())) - .add(DiscoveryNodeUtils.create(UUID.randomUUID().toString())) - ) - .build(); - List issues = DeprecationChecks.filterChecks(CLUSTER_SETTINGS_CHECKS, c -> c.apply(state)); - assertThat(0, equalTo(issues.size())); - - final ClusterState stateWithProblems = ClusterState.builder(new ClusterName(randomAlphaOfLength(5))) - .metadata( - Metadata.builder() - .persistentSettings(Settings.builder().put(ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), 4).build()) - .put( - IndexMetadata.builder(randomAlphaOfLength(10)) - .settings(settings(Version.CURRENT).put(DataTier.TIER_PREFERENCE_SETTING.getKey(), " ")) - .numberOfShards(1) - .numberOfReplicas(1) - .build(), - false - ) - .build() - ) - .nodes( - DiscoveryNodes.builder() - .add(DiscoveryNodeUtils.create(UUID.randomUUID().toString())) - .add(DiscoveryNodeUtils.create(UUID.randomUUID().toString())) - ) - .build(); - - issues = DeprecationChecks.filterChecks(CLUSTER_SETTINGS_CHECKS, c -> c.apply(stateWithProblems)); - - DeprecationIssue expected = new DeprecationIssue( - DeprecationIssue.Level.WARNING, - "The cluster has too many shards to be able to upgrade", - "https://ela.st/es-deprecation-8-shard-limit", - "Upgrading requires adding a small number of new shards. There is not enough room for 10 more shards. Increase the cluster" - + ".max_shards_per_node setting, or remove indices to clear up resources.", - false, - null - ); - assertEquals(singletonList(expected), issues); - } -} diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java index 3d5f0dcecc71..95eee71d1911 100644 --- a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java +++ b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.engine.frozen.FrozenEngine; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.deprecation.DeprecationIssue; @@ -27,7 +28,7 @@ public class IndexDeprecationChecksTests extends ESTestCase { public void testOldIndicesCheck() { - Version createdWith = Version.fromString("1.0.0"); + IndexVersion createdWith = IndexVersion.fromId(1000099); IndexMetadata indexMetadata = IndexMetadata.builder("test") .settings(settings(createdWith)) .numberOfShards(1) diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPlugin.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPlugin.java index eb34445758cc..9630dc0547d8 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPlugin.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPlugin.java @@ -24,6 +24,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.ingest.Processor; import org.elasticsearch.license.XPackLicenseState; @@ -201,7 +202,8 @@ public Collection createComponents( IndexNameExpressionResolver expressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { EnrichPolicyLocks enrichPolicyLocks = new EnrichPolicyLocks(); EnrichPolicyExecutor enrichPolicyExecutor = new EnrichPolicyExecutor( diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/30_search_application_get.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/30_search_application_get.yml index cb4d7968ed34..c5343a5d3900 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/30_search_application_get.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/30_search_application_get.yml @@ -114,3 +114,69 @@ teardown: search_application.get: name: test-nonexistent-search-application +--- +"Get Search Application - Warning returned when indices and aliases are inconsistent": + - skip: + features: warnings + + - do: + indices.create: + index: test-index3 + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + + - do: + indices.update_aliases: + body: + actions: + - add: + index: test-index1 + alias: test-search-application-1 + - add: + index: test-index2 + alias: test-search-application-1 + - add: + index: test-index3 + alias: test-search-application-1 + + - do: + indices.delete: + index: test-index2 + ignore: 404 + + - do: + warnings: + - "test-index2 index is in search application but not in associated alias" + - "test-index3 index is in alias but not associcated with search application" + search_application.get: + name: test-search-application-1 + + - match: { name: "test-search-application-1" } + - match: { indices: [ "test-index1", "test-index2" ] } + - match: { analytics_collection_name: "test-analytics" } + - match: { + template: { + script: { + source: { + query: { + query_string: { + query: "{{query_string}}" + } + } + }, + lang: "mustache", + options: { + content_type: "application/json;charset=utf-8" + } + }, + dictionary: { + query_string: { + type: string + } + } + } + } + - gte: { updated_at_millis: 0 } diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/55_search_application_search.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/55_search_application_search.yml index 919d01879b1e..08acaf01477a 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/55_search_application_search.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/55_search_application_search.yml @@ -257,3 +257,47 @@ teardown: params: field_name: field3 field_value: value3 + +--- +"Query Search Application - Warning returned when indices and aliases are inconsistent": + - skip: + features: ["warnings", "headers"] + + - do: + indices.create: + index: test-index3 + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + + - do: + indices.update_aliases: + body: + actions: + - add: + index: test-search-index1 + alias: test-search-application-1 + - add: + index: test-search-index2 + alias: test-search-application-1 + - add: + index: test-index3 + alias: test-search-application-1 + + - do: + indices.delete: + index: test-search-index2 + ignore: 404 + + - do: + warnings: + - "test-search-index2 index is in search application but not in associated alias" + - "test-index3 index is in alias but not associcated with search application" + headers: { Authorization: "Basic ZW50c2VhcmNoLXVzZXI6ZW50c2VhcmNoLXVzZXItcGFzc3dvcmQ=" } # user + search_application.search: + name: test-search-application-1 + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "doc1" } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearch.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearch.java index a9551a112aba..8abee617478a 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearch.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearch.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.util.FeatureFlag; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.logging.LogManager; @@ -211,7 +212,8 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { if (enabled == false) { return Collections.emptyList(); diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/SearchApplicationIndexService.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/SearchApplicationIndexService.java index 0a401236fdd9..01a95f9b0037 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/SearchApplicationIndexService.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/SearchApplicationIndexService.java @@ -73,6 +73,7 @@ import java.util.Arrays; import java.util.Base64; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -94,6 +95,8 @@ public class SearchApplicationIndexService { public static final String SEARCH_APPLICATION_ALIAS_NAME = ".search-app"; public static final String SEARCH_APPLICATION_CONCRETE_INDEX_NAME = ".search-app-1"; public static final String SEARCH_APPLICATION_INDEX_NAME_PATTERN = ".search-app-*"; + private static final String INCONSISTENT_INDICES_NOT_IN_ALIAS_MSG = "index is in search application but not in associated alias"; + private static final String INCONSISTENT_ALIAS_NOT_IN_INDICES_MSG = "index is in alias but not associcated with search application"; // The client to perform any operations on user indices (alias, ...). private final Client client; @@ -208,6 +211,42 @@ public void getSearchApplication(String resourceName, ActionListener> listener) { + final Metadata metadata = clusterService.state().metadata(); + final String searchAliasName = getSearchAliasName(app); + final Map inconsistentIndices = new HashMap<>(); + + if (metadata.hasAlias(searchAliasName)) { + Set indicesInAlias = metadata.aliasedIndices(searchAliasName).stream().map(Index::getName).collect(Collectors.toSet()); + Set configuredIndices = Set.of(app.indices()); + + Set indicesInAliasButNotInSearchApp = findMissingIndices(indicesInAlias, configuredIndices); + indicesInAliasButNotInSearchApp.forEach(index -> inconsistentIndices.put(index, INCONSISTENT_ALIAS_NOT_IN_INDICES_MSG)); + + Set indicesInSearchAppButNotInAlias = findMissingIndices(configuredIndices, indicesInAlias); + indicesInSearchAppButNotInAlias.forEach(index -> inconsistentIndices.put(index, INCONSISTENT_INDICES_NOT_IN_ALIAS_MSG)); + } else { + for (String index : app.indices()) { + inconsistentIndices.put(index, INCONSISTENT_INDICES_NOT_IN_ALIAS_MSG); + } + } + + listener.onResponse(inconsistentIndices); + } + + private Set findMissingIndices(Set indices, Set toExclude) { + Set diff = new HashSet<>(indices); + diff.removeAll(toExclude); + return diff; + } + private static String getSearchAliasName(SearchApplication app) { return app.name(); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/TransportGetSearchApplicationAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/TransportGetSearchApplicationAction.java index 2eca3ebf1ccd..a99070cb34f0 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/TransportGetSearchApplicationAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/TransportGetSearchApplicationAction.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.logging.HeaderWarning; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; @@ -44,6 +45,13 @@ protected void doExecute( GetSearchApplicationAction.Request request, ActionListener listener ) { - systemIndexService.getSearchApplication(request.getName(), listener.map(GetSearchApplicationAction.Response::new)); + systemIndexService.getSearchApplication(request.getName(), listener.delegateFailure((l, searchApplication) -> { + systemIndexService.checkAliasConsistency(searchApplication, listener.delegateFailure((l2, inconsistentIndices) -> { + for (String key : inconsistentIndices.keySet()) { + HeaderWarning.addWarning(key + " " + inconsistentIndices.get(key)); + } + listener.onResponse(new GetSearchApplicationAction.Response(searchApplication)); + })); + })); } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/TransportQuerySearchApplicationAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/TransportQuerySearchApplicationAction.java index 706ac5c6b832..5cf94f3f0c5f 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/TransportQuerySearchApplicationAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/TransportQuerySearchApplicationAction.java @@ -17,6 +17,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.logging.HeaderWarning; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; @@ -60,11 +61,16 @@ protected void doExecute(Task task, SearchApplicationSearchRequest request, Acti SearchSourceBuilder sourceBuilder = templateService.renderQuery(searchApplication, request.queryParams()); SearchRequest searchRequest = new SearchRequest(searchApplication.name()).source(sourceBuilder); - client.execute( - SearchAction.INSTANCE, - searchRequest, - listener.delegateFailure((l2, searchResponse) -> l2.onResponse(searchResponse)) - ); + systemIndexService.checkAliasConsistency(searchApplication, listener.delegateFailure((l2, inconsistentIndices) -> { + for (String key : inconsistentIndices.keySet()) { + HeaderWarning.addWarning(key + " " + inconsistentIndices.get(key)); + } + client.execute( + SearchAction.INSTANCE, + searchRequest, + listener.delegateFailure((l3, searchResponse) -> l3.onResponse(searchResponse)) + ); + })); } catch (Exception e) { l.onFailure(e); } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/EqlPlugin.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/EqlPlugin.java index 8097447df048..f7417f31943e 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/EqlPlugin.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/EqlPlugin.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.breaker.BreakerSettings; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.monitor.jvm.JvmInfo; @@ -83,7 +84,8 @@ public Collection createComponents( IndexNameExpressionResolver expressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { return createComponents(client, environment.settings(), clusterService); } diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/analysis/CancellationTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/analysis/CancellationTests.java index c9011eabfcb2..eba52ff91c2a 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/analysis/CancellationTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/analysis/CancellationTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.eql.analysis; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.fieldcaps.FieldCapabilities; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; @@ -18,6 +17,7 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.breaker.NoopCircuitBreaker; @@ -69,7 +69,12 @@ public class CancellationTests extends ESTestCase { public void mockTransportService() { threadPool = new TestThreadPool(getClass().getName()); // The TransportService needs to be able to return a valid RemoteClusterServices object down the stream, required by the Verifier. - transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, TransportVersion.current(), threadPool); + transportService = MockTransportService.createNewService( + Settings.EMPTY, + VersionInformation.CURRENT, + TransportVersion.current(), + threadPool + ); } @After diff --git a/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/Fleet.java b/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/Fleet.java index 1f3fa6b503f1..5adea642ed16 100644 --- a/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/Fleet.java +++ b/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/Fleet.java @@ -34,6 +34,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.indices.ExecutorNames; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.SystemDataStreamDescriptor; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.indices.SystemIndexDescriptor.Type; @@ -95,7 +96,8 @@ public Collection createComponents( IndexNameExpressionResolver expressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { FleetTemplateRegistry registry = new FleetTemplateRegistry( environment.settings(), diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/IdentityProviderPlugin.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/IdentityProviderPlugin.java index 28d76d3a2562..d5cea8ac5caa 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/IdentityProviderPlugin.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/IdentityProviderPlugin.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; @@ -98,7 +99,8 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { settings = environment.settings(); enabled = ENABLED_SETTING.get(settings); diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/UpdateSettingsStepTests.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/UpdateSettingsStepTests.java index 39753fefd00d..b084826bc01c 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/UpdateSettingsStepTests.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/UpdateSettingsStepTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.IndexModule; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; @@ -74,7 +75,8 @@ public Collection createComponents( IndexNameExpressionResolver expressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { return List.of(service); } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java index 1e70f3201c9d..37761b76d374 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java @@ -28,6 +28,7 @@ import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.health.HealthIndicatorService; import org.elasticsearch.index.IndexModule; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.HealthPlugin; @@ -213,7 +214,8 @@ public Collection createComponents( IndexNameExpressionResolver expressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { final List components = new ArrayList<>(); ILMHistoryTemplateRegistry ilmTemplateRegistry = new ILMHistoryTemplateRegistry( diff --git a/x-pack/plugin/mapper-aggregate-metric/src/test/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldTypeTests.java b/x-pack/plugin/mapper-aggregate-metric/src/test/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldTypeTests.java index af80b8ea9ff2..c32e7e583c78 100644 --- a/x-pack/plugin/mapper-aggregate-metric/src/test/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldTypeTests.java +++ b/x-pack/plugin/mapper-aggregate-metric/src/test/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldTypeTests.java @@ -147,7 +147,7 @@ public double execute(ExplanationHolder explanation) { } }; } - }, searchExecutionContext.lookup(), 7f, "test", 0, IndexVersion.CURRENT)), equalTo(2)); + }, searchExecutionContext.lookup(), 7f, "test", 0, IndexVersion.current())), equalTo(2)); } } } diff --git a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackage.java b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackage.java index 367915c1169c..6c36608f05fc 100644 --- a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackage.java +++ b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackage.java @@ -92,13 +92,13 @@ protected void masterOperation(Task task, Request request, ClusterState state, A final long relativeStartNanos = System.nanoTime(); logAndWriteNotificationAtInfo(modelId, "starting model upload"); - URI uri = new URI(repository).resolve(packagedModelId + ModelLoaderUtils.MODEL_FILE_EXTENSION); + URI uri = ModelLoaderUtils.resolvePackageLocation(repository, packagedModelId + ModelLoaderUtils.MODEL_FILE_EXTENSION); // Uploading other artefacts of the model first, that way the model is last and a simple search can be used to check if the // download is complete if (Strings.isNullOrEmpty(modelPackageConfig.getVocabularyFile()) == false) { Tuple, List> vocabularyAndMerges = ModelLoaderUtils.loadVocabulary( - new URI(repository).resolve(modelPackageConfig.getVocabularyFile()) + ModelLoaderUtils.resolvePackageLocation(repository, modelPackageConfig.getVocabularyFile()) ); PutTrainedModelVocabularyAction.Request r2 = new PutTrainedModelVocabularyAction.Request( diff --git a/x-pack/plugin/ml/qa/basic-multi-node/build.gradle b/x-pack/plugin/ml/qa/basic-multi-node/build.gradle index 72793d1f9a41..3268c15879b9 100644 --- a/x-pack/plugin/ml/qa/basic-multi-node/build.gradle +++ b/x-pack/plugin/ml/qa/basic-multi-node/build.gradle @@ -1,3 +1,4 @@ +import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.internal.info.BuildParams apply plugin: 'elasticsearch.legacy-java-rest-test' @@ -12,6 +13,7 @@ testClusters.configureEach { setting 'xpack.license.self_generated.type', 'trial' setting 'indices.lifecycle.history_index_enabled', 'false' setting 'slm.history_index_enabled', 'false' + requiresFeature 'es.inference_rescorer_feature_flag_enabled', Version.fromString("8.10.0") } if (BuildParams.inFipsJvm){ diff --git a/x-pack/plugin/ml/qa/basic-multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlRescorerIT.java b/x-pack/plugin/ml/qa/basic-multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlRescorerIT.java new file mode 100644 index 000000000000..671889b207c7 --- /dev/null +++ b/x-pack/plugin/ml/qa/basic-multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlRescorerIT.java @@ -0,0 +1,273 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; + +public class MlRescorerIT extends ESRestTestCase { + + private static final String MODEL_ID = "basic-ltr-model"; + private static final String INDEX_NAME = "store"; + + @Before + public void setupModelAndData() throws IOException { + putRegressionModel(MODEL_ID, """ + { + "description": "super complex model for tests", + "input": {"field_names": ["cost", "product"]}, + "inference_config": { + "learn_to_rank": { + } + }, + "definition": { + "preprocessors" : [{ + "one_hot_encoding": { + "field": "product", + "hot_map": { + "TV": "type_tv", + "VCR": "type_vcr", + "Laptop": "type_laptop" + } + } + }], + "trained_model": { + "ensemble": { + "feature_names": ["cost", "type_tv", "type_vcr", "type_laptop"], + "target_type": "regression", + "trained_models": [ + { + "tree": { + "feature_names": [ + "cost" + ], + "tree_structure": [ + { + "node_index": 0, + "split_feature": 0, + "split_gain": 12, + "threshold": 400, + "decision_type": "lte", + "default_left": true, + "left_child": 1, + "right_child": 2 + }, + { + "node_index": 1, + "leaf_value": 5.0 + }, + { + "node_index": 2, + "leaf_value": 2.0 + } + ], + "target_type": "regression" + } + }, + { + "tree": { + "feature_names": [ + "type_tv" + ], + "tree_structure": [ + { + "node_index": 0, + "split_feature": 0, + "split_gain": 12, + "threshold": 1, + "decision_type": "lt", + "default_left": true, + "left_child": 1, + "right_child": 2 + }, + { + "node_index": 1, + "leaf_value": 1.0 + }, + { + "node_index": 2, + "leaf_value": 12.0 + } + ], + "target_type": "regression" + } + } + ] + } + } + } + }"""); + createIndex(INDEX_NAME, Settings.builder().put("number_of_shards", randomIntBetween(1, 3)).build(), """ + "properties":{ + "product":{"type": "keyword"}, + "cost":{"type": "integer"}}"""); + indexData("{ \"product\": \"TV\", \"cost\": 300 }"); + indexData("{ \"product\": \"TV\", \"cost\": 400 }"); + indexData("{ \"product\": \"VCR\", \"cost\": 150 }"); + indexData("{ \"product\": \"VCR\", \"cost\": 180 }"); + indexData("{ \"product\": \"Laptop\", \"cost\": 15000 }"); + refreshAllIndices(); + } + + @SuppressWarnings("unchecked") + public void testLtrSimple() throws Exception { + Response searchResponse = search(""" + { + "query": { + "match": { "product": { "query": "TV"}} + }, + "rescore": { + "window_size": 10, + "inference": { + "model_id": "basic-ltr-model" + } + } + + }"""); + + Map response = responseAsMap(searchResponse); + assertThat((List) XContentMapValues.extractValue("hits.hits._score", response), contains(17.0, 17.0)); + } + + @SuppressWarnings("unchecked") + public void testLtrSimpleDFS() throws Exception { + Response searchResponse = searchDfs(""" + { + "query": { + "match": { "product": { "query": "TV"}} + }, + "rescore": { + "window_size": 10, + "inference": { + "model_id": "basic-ltr-model" + } + } + + }"""); + + Map response = responseAsMap(searchResponse); + assertThat(response.toString(), (List) XContentMapValues.extractValue("hits.hits._score", response), contains(17.0, 17.0)); + } + + @SuppressWarnings("unchecked") + public void testLtrSimpleEmpty() throws Exception { + Response searchResponse = search(""" + { "query": { + "term": { "product": "computer"} + }, + "rescore": { + "window_size": 10, + "inference": { + "model_id": "basic-ltr-model" + } + } + + }"""); + + Map response = responseAsMap(searchResponse); + assertThat((List) XContentMapValues.extractValue("hits.hits._score", response), empty()); + } + + @SuppressWarnings("unchecked") + public void testLtrEmptyDFS() throws Exception { + Response searchResponse = searchDfs(""" + { "query": { + "match": { "product": { "query": "computer"}} + }, + "rescore": { + "window_size": 10, + "inference": { + "model_id": "basic-ltr-model" + } + } + + }"""); + + Map response = responseAsMap(searchResponse); + assertThat(response.toString(), (List) XContentMapValues.extractValue("hits.hits._score", response), empty()); + } + + @SuppressWarnings("unchecked") + public void testLtrCanMatch() throws Exception { + Response searchResponse = searchCanMatch(""" + { "query": { + "match": { "product": { "query": "TV"}} + }, + "rescore": { + "window_size": 10, + "inference": { + "model_id": "basic-ltr-model" + } + } + + }""", false); + + Map response = responseAsMap(searchResponse); + assertThat(response.toString(), (List) XContentMapValues.extractValue("hits.hits._score", response), contains(17.0, 17.0)); + + searchResponse = searchCanMatch(""" + { "query": { + "match": { "product": { "query": "TV"}} + }, + "rescore": { + "window_size": 10, + "inference": { + "model_id": "basic-ltr-model" + } + } + + }""", true); + + response = responseAsMap(searchResponse); + assertThat(response.toString(), (List) XContentMapValues.extractValue("hits.hits._score", response), contains(17.0, 17.0)); + } + + private void indexData(String data) throws IOException { + Request request = new Request("POST", INDEX_NAME + "/_doc"); + request.setJsonEntity(data); + client().performRequest(request); + } + + private Response search(String searchBody) throws IOException { + Request request = new Request("POST", INDEX_NAME + "/_search?request_cache=false"); + request.setJsonEntity(searchBody); + return client().performRequest(request); + } + + private Response searchDfs(String searchBody) throws IOException { + Request request = new Request("POST", INDEX_NAME + "/_search?search_type=dfs_query_then_fetch&request_cache=false"); + request.setJsonEntity(searchBody); + return client().performRequest(request); + } + + private Response searchCanMatch(String searchBody, boolean dfs) throws IOException { + Request request = dfs + ? new Request("POST", INDEX_NAME + "/_search?search_type=dfs_query_then_fetch&request_cache=false&pre_filter_shard_size=1") + : new Request("POST", INDEX_NAME + "/_search?request_cache=false&pre_filter_shard_size=1"); + request.setJsonEntity(searchBody); + return client().performRequest(request); + } + + private void putRegressionModel(String modelId, String body) throws IOException { + Request model = new Request("PUT", "_ml/trained_models/" + modelId); + model.setJsonEntity(body); + assertThat(client().performRequest(model).getStatusLine().getStatusCode(), equalTo(200)); + } + +} diff --git a/x-pack/plugin/ml/qa/ml-with-security/build.gradle b/x-pack/plugin/ml/qa/ml-with-security/build.gradle index b49a71b34b7f..b28e6bec462b 100644 --- a/x-pack/plugin/ml/qa/ml-with-security/build.gradle +++ b/x-pack/plugin/ml/qa/ml-with-security/build.gradle @@ -1,3 +1,4 @@ +import org.elasticsearch.gradle.Version apply plugin: 'elasticsearch.legacy-yaml-rest-test' dependencies { @@ -180,6 +181,7 @@ tasks.named("yamlRestTest").configure { 'ml/inference_crud/Test put nlp model config with vocabulary set', 'ml/inference_crud/Test put model model aliases with nlp model', 'ml/inference_processor/Test create processor with missing mandatory fields', + 'ml/inference_rescore/Test rescore with missing model', 'ml/inference_stats_crud/Test get stats given missing trained model', 'ml/inference_stats_crud/Test get stats given expression without matches and allow_no_match is false', 'ml/jobs_crud/Test cannot create job with model snapshot id set', @@ -256,4 +258,5 @@ testClusters.configureEach { user username: "no_ml", password: "x-pack-test-password", role: "minimal" setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.security.enabled', 'true' + requiresFeature 'es.inference_rescorer_feature_flag_enabled', Version.fromString("8.10.0") } diff --git a/x-pack/plugin/ml/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/InferenceRescorerIT.java b/x-pack/plugin/ml/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/InferenceRescorerIT.java new file mode 100644 index 000000000000..1748d8a7f94a --- /dev/null +++ b/x-pack/plugin/ml/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/InferenceRescorerIT.java @@ -0,0 +1,194 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +public class InferenceRescorerIT extends InferenceTestCase { + + private static final String MODEL_ID = "ltr-model"; + private static final String INDEX_NAME = "store"; + + @Before + public void setupModelAndData() throws IOException { + putRegressionModel(MODEL_ID, """ + { + "description": "super complex model for tests", + "input": {"field_names": ["cost", "product"]}, + "inference_config": { + "learn_to_rank": { + } + }, + "definition": { + "preprocessors" : [{ + "one_hot_encoding": { + "field": "product", + "hot_map": { + "TV": "type_tv", + "VCR": "type_vcr", + "Laptop": "type_laptop" + } + } + }], + "trained_model": { + "ensemble": { + "feature_names": ["cost", "type_tv", "type_vcr", "type_laptop"], + "target_type": "regression", + "trained_models": [ + { + "tree": { + "feature_names": [ + "cost" + ], + "tree_structure": [ + { + "node_index": 0, + "split_feature": 0, + "split_gain": 12, + "threshold": 400, + "decision_type": "lte", + "default_left": true, + "left_child": 1, + "right_child": 2 + }, + { + "node_index": 1, + "leaf_value": 5.0 + }, + { + "node_index": 2, + "leaf_value": 2.0 + } + ], + "target_type": "regression" + } + }, + { + "tree": { + "feature_names": [ + "type_tv" + ], + "tree_structure": [ + { + "node_index": 0, + "split_feature": 0, + "split_gain": 12, + "threshold": 1, + "decision_type": "lt", + "default_left": true, + "left_child": 1, + "right_child": 2 + }, + { + "node_index": 1, + "leaf_value": 1.0 + }, + { + "node_index": 2, + "leaf_value": 12.0 + } + ], + "target_type": "regression" + } + } + ] + } + } + } + }"""); + createIndex(INDEX_NAME, Settings.EMPTY, """ + "properties":{ + "product":{"type": "keyword"}, + "cost":{"type": "integer"}}"""); + indexData("{ \"product\": \"TV\", \"cost\": 300}"); + indexData("{ \"product\": \"TV\", \"cost\": 400}"); + indexData("{ \"product\": \"TV\", \"cost\": 600}"); + indexData("{ \"product\": \"VCR\", \"cost\": 15}"); + indexData("{ \"product\": \"VCR\", \"cost\": 350}"); + indexData("{ \"product\": \"VCR\", \"cost\": 580}"); + indexData("{ \"product\": \"Laptop\", \"cost\": 100}"); + indexData("{ \"product\": \"Laptop\", \"cost\": 300}"); + indexData("{ \"product\": \"Laptop\", \"cost\": 500}"); + adminClient().performRequest(new Request("POST", INDEX_NAME + "/_refresh")); + } + + public void testInferenceRescore() throws Exception { + Request request = new Request("GET", "store/_search?size=3"); + request.setJsonEntity(""" + { + "rescore": { + "window_size": 10, + "inference": { "model_id": "ltr-model" } + } + }"""); + assertHitScores(client().performRequest(request), List.of(17.0, 17.0, 14.0)); + request.setJsonEntity(""" + { + "query": {"term": {"product": "Laptop"}}, + "rescore": { + "window_size": 10, + "inference": { "model_id": "ltr-model" } + } + }"""); + assertHitScores(client().performRequest(request), List.of(6.0, 6.0, 3.0)); + } + + public void testInferenceRescoreSmallWindow() throws Exception { + Request request = new Request("GET", "store/_search?size=5"); + request.setJsonEntity(""" + { + "rescore": { + "window_size": 2, + "inference": { "model_id": "ltr-model" } + } + }"""); + assertHitScores(client().performRequest(request), List.of(17.0, 17.0, 1.0, 1.0, 1.0)); + } + + public void testInferenceRescorerWithChainedRescorers() throws IOException { + Request request = new Request("GET", "store/_search?size=5"); + request.setJsonEntity(""" + { + "rescore": [ + { + "window_size": 4, + "query": { "rescore_query":{ "script_score": {"query": {"match_all": {}}, "script": {"source": "return 4"}}}} + }, + { + "window_size": 3, + "inference": { "model_id": "ltr-model" } + }, + { + "window_size": 2, + "query": { "rescore_query": { "script_score": {"query": {"match_all": {}}, "script": {"source": "return 20"}}}} + } + ] + }"""); + assertHitScores(client().performRequest(request), List.of(37.0, 37.0, 14.0, 5.0, 1.0)); + } + + private void indexData(String data) throws IOException { + Request request = new Request("POST", INDEX_NAME + "/_doc"); + request.setJsonEntity(data); + client().performRequest(request); + } + + @SuppressWarnings("unchecked") + private static void assertHitScores(Response response, List expectedScores) throws IOException { + assertThat((List) XContentMapValues.extractValue("hits.hits._score", responseAsMap(response)), equalTo(expectedScores)); + } +} diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlAutoUpdateServiceIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlAutoUpdateServiceIT.java index 39d7a2577aeb..44903314c3a3 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlAutoUpdateServiceIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlAutoUpdateServiceIT.java @@ -15,6 +15,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.indices.TestIndexNameExpressionResolver; @@ -102,7 +103,7 @@ public void testAutomaticModelUpdate() throws Exception { new TransportAddress(InetAddress.getLoopbackAddress(), 9300), Collections.emptyMap(), Set.of(DiscoveryNodeRole.MASTER_ROLE), - Version.V_8_0_0 + VersionInformation.inferVersions(Version.V_8_0_0) ) ) .localNodeId("node_id") diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index 19bca24ba488..935dd5ef2fc6 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -50,6 +50,7 @@ import org.elasticsearch.index.analysis.TokenizerFactory; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.indices.AssociatedIndexDescriptor; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.indices.analysis.AnalysisModule.AnalysisProvider; import org.elasticsearch.indices.breaker.BreakerSettings; @@ -189,6 +190,7 @@ import org.elasticsearch.xpack.core.ml.dataframe.evaluation.MlEvaluationNamedXContentProvider; import org.elasticsearch.xpack.core.ml.dataframe.stats.AnalysisStatsNamedWriteablesProvider; import org.elasticsearch.xpack.core.ml.inference.MlInferenceNamedXContentProvider; +import org.elasticsearch.xpack.core.ml.inference.MlLTRNamedXContentProvider; import org.elasticsearch.xpack.core.ml.inference.persistence.InferenceIndexConstants; import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; @@ -329,6 +331,8 @@ import org.elasticsearch.xpack.ml.inference.pytorch.process.BlackHolePyTorchProcess; import org.elasticsearch.xpack.ml.inference.pytorch.process.NativePyTorchProcessFactory; import org.elasticsearch.xpack.ml.inference.pytorch.process.PyTorchProcessFactory; +import org.elasticsearch.xpack.ml.inference.rescorer.InferenceRescorerBuilder; +import org.elasticsearch.xpack.ml.inference.rescorer.InferenceRescorerFeature; import org.elasticsearch.xpack.ml.job.JobManager; import org.elasticsearch.xpack.ml.job.JobManagerHolder; import org.elasticsearch.xpack.ml.job.NodeLoadDetector; @@ -846,6 +850,21 @@ private void reportClashingNodeAttribute(String attrName) { ); } + @Override + public List> getRescorers() { + if (enabled && InferenceRescorerFeature.isEnabled()) { + // Inference rescorer requires access to the model loading service + return List.of( + new RescorerSpec<>( + InferenceRescorerBuilder.NAME, + in -> new InferenceRescorerBuilder(in, modelLoadingService::get), + parser -> InferenceRescorerBuilder.fromXContent(parser, modelLoadingService::get) + ) + ); + } + return List.of(); + } + @Override public Collection createComponents( Client client, @@ -860,7 +879,8 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { if (enabled == false) { // special holder for @link(MachineLearningFeatureSetUsage) which needs access to job manager, empty if ML is disabled @@ -1736,6 +1756,10 @@ public List getNamedXContent() { ) ); namedXContent.addAll(new CorrelationNamedContentProvider().getNamedXContentParsers()); + // LTR Combine with Inference named content provider when feature flag is removed + if (InferenceRescorerFeature.isEnabled()) { + namedXContent.addAll(new MlLTRNamedXContentProvider().getNamedXContentParsers()); + } return namedXContent; } @@ -1820,7 +1844,10 @@ public List getNamedWriteables() { namedWriteables.addAll(MlAutoscalingNamedWritableProvider.getNamedWriteables()); namedWriteables.addAll(new CorrelationNamedContentProvider().getNamedWriteables()); namedWriteables.addAll(new ChangePointNamedContentProvider().getNamedWriteables()); - + // LTR Combine with Inference named content provider when feature flag is removed + if (InferenceRescorerFeature.isEnabled()) { + namedWriteables.addAll(new MlLTRNamedXContentProvider().getNamedWriteables()); + } return namedWriteables; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDataFrameAnalyticsStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDataFrameAnalyticsStatsAction.java index 62e747d1d443..e9478bc2462d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDataFrameAnalyticsStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDataFrameAnalyticsStatsAction.java @@ -129,7 +129,7 @@ protected void taskOperation( DataFrameAnalyticsTask task, ActionListener> listener ) { - logger.debug("Get stats for running task [{}]", task.getParams().getId()); + logger.trace("Get stats for running task [{}]", task.getParams().getId()); ActionListener updateProgressListener = ActionListener.wrap(aVoid -> { StatsHolder statsHolder = task.getStatsHolder(); @@ -160,7 +160,7 @@ protected void doExecute( ActionListener listener ) { TaskId parentTaskId = new TaskId(clusterService.localNode().getId(), task.getId()); - logger.debug("Get stats for data frame analytics [{}]", request.getId()); + logger.trace("Get stats for data frame analytics [{}]", request.getId()); ActionListener getResponseListener = ActionListener.wrap(getResponse -> { List expandedIds = getResponse.getResources() @@ -249,7 +249,7 @@ static List determineStoppedConfigs(List listener) { - logger.debug("[{}] Gathering stats for stopped task", config.getId()); + logger.trace("[{}] Gathering stats for stopped task", config.getId()); RetrievedStatsHolder retrievedStatsHolder = new RetrievedStatsHolder( ProgressTracker.fromZeroes(config.getAnalysis().getProgressPhases(), config.getAnalysis().supportsInference()).report() diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java index 544ce742521b..d327430a5bfa 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java @@ -64,7 +64,7 @@ public TransportGetDatafeedsStatsAction( @Override protected void doExecute(Task task, Request request, ActionListener listener) { - logger.debug(() -> "[" + request.getDatafeedId() + "] get stats for datafeed"); + logger.trace(() -> "[" + request.getDatafeedId() + "] get stats for datafeed"); ClusterState state = clusterService.state(); final PersistentTasksCustomMetadata tasksInProgress = state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); final Response.Builder responseBuilder = new Response.Builder(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java index 038d5fa0b610..fa63f5f9d78c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java @@ -95,7 +95,7 @@ public TransportGetJobsStatsAction( @Override protected void doExecute(Task task, GetJobsStatsAction.Request request, ActionListener finalListener) { - logger.debug("Get stats for job [{}]", request.getJobId()); + logger.trace("Get stats for job [{}]", request.getJobId()); TaskId parentTaskId = new TaskId(clusterService.localNode().getId(), task.getId()); ClusterState state = clusterService.state(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/inference/InferencePipelineAggregationBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/inference/InferencePipelineAggregationBuilder.java index e416f981dda6..ec25edbf513a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/inference/InferencePipelineAggregationBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/inference/InferencePipelineAggregationBuilder.java @@ -264,7 +264,7 @@ public InferencePipelineAggregationBuilder rewrite(QueryRewriteContext context) SetOnce loadedModel = new SetOnce<>(); BiConsumer> modelLoadAction = (client, listener) -> modelLoadingService.get() - .getModelForSearch(modelId, listener.delegateFailure((delegate, localModel) -> { + .getModelForAggregation(modelId, listener.delegateFailure((delegate, localModel) -> { loadedModel.set(localModel); boolean isLicensed = localModel.getLicenseLevel() == License.OperationMode.BASIC diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/LocalModel.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/LocalModel.java index 4cc5cfadcd49..4e64b3e2f8f2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/LocalModel.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/LocalModel.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.license.License; import org.elasticsearch.xpack.core.ml.inference.TrainedModelInput; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelType; import org.elasticsearch.xpack.core.ml.inference.results.InferenceResults; import org.elasticsearch.xpack.core.ml.inference.results.WarningInferenceResults; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig; @@ -22,6 +23,7 @@ import org.elasticsearch.xpack.ml.inference.TrainedModelStatsService; import java.io.Closeable; +import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -58,6 +60,7 @@ public class LocalModel implements Closeable { private final CircuitBreaker trainedModelCircuitBreaker; private final AtomicLong referenceCount; private final long cachedRamBytesUsed; + private final TrainedModelType trainedModelType; LocalModel( String modelId, @@ -67,6 +70,7 @@ public class LocalModel implements Closeable { Map defaultFieldMap, InferenceConfig modelInferenceConfig, License.OperationMode licenseLevel, + TrainedModelType trainedModelType, TrainedModelStatsService trainedModelStatsService, CircuitBreaker trainedModelCircuitBreaker ) { @@ -84,6 +88,7 @@ public class LocalModel implements Closeable { this.licenseLevel = licenseLevel; this.trainedModelCircuitBreaker = trainedModelCircuitBreaker; this.referenceCount = new AtomicLong(1); + this.trainedModelType = trainedModelType; } long ramBytesUsed() { @@ -93,6 +98,14 @@ long ramBytesUsed() { return cachedRamBytesUsed; } + public InferenceConfig getInferenceConfig() { + return inferenceConfig; + } + + TrainedModelType getTrainedModelType() { + return trainedModelType; + } + public String getModelId() { return modelId; } @@ -129,6 +142,10 @@ public InferenceResults inferNoStats(Map fields) { return trainedModelDefinition.infer(flattenedFields, inferenceConfig); } + public Collection inputFields() { + return fieldNames; + } + public void infer(Map fields, InferenceConfigUpdate update, ActionListener listener) { if (update.isSupported(this.inferenceConfig) == false) { listener.onFailure( diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java index f893341cbec8..da2f97e283f2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java @@ -8,6 +8,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; @@ -25,6 +26,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.ingest.IngestMetadata; @@ -35,6 +37,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelType; import org.elasticsearch.xpack.core.ml.inference.results.InferenceResults; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ClassificationConfig; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig; @@ -50,12 +53,14 @@ import org.elasticsearch.xpack.ml.notifications.InferenceAuditor; import java.util.ArrayDeque; +import java.util.Arrays; import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Queue; import java.util.Set; import java.util.concurrent.ExecutionException; @@ -109,11 +114,71 @@ public class ModelLoadingService implements ClusterStateListener { Setting.Property.NodeScope ); - // The feature requesting the model + /** + * The cached model consumer. Various consumers dictate the model's usage and context + */ public enum Consumer { - PIPELINE, - SEARCH, - INTERNAL + PIPELINE() { + @Override + public boolean inferenceConfigSupported(InferenceConfig config) { + return config == null || config.supportsIngestPipeline(); + } + + @Override + public String exceptionName() { + return "ingest"; + } + }, + SEARCH_AGGS() { + @Override + public boolean inferenceConfigSupported(InferenceConfig config) { + return config == null || config.supportsPipelineAggregation(); + } + + @Override + public String exceptionName() { + return "search(aggregation)"; + } + }, + SEARCH_RESCORER() { + @Override + public boolean inferenceConfigSupported(InferenceConfig config) { + // Null configs imply creation via target type. This is for BWC for very old models + // Consequently, if the config is null, we don't support LTR with them. + return config != null && config.supportsSearchRescorer(); + } + + @Override + public String exceptionName() { + return "search(rescorer)"; + } + }, + INTERNAL() { + @Override + public boolean inferenceConfigSupported(InferenceConfig config) { + return true; + } + + @Override + public String exceptionName() { + return "internal"; + } + }; + + /** + * @param config The inference config for the model. It may be null for very old Regression or classification models + * @return Is this configuration type supported within this cache context? + */ + public abstract boolean inferenceConfigSupported(@Nullable InferenceConfig config); + + /** + * @return The cache context name to use if an exception must be thrown due to the config not being supported + */ + public abstract String exceptionName(); + + public boolean isAnyOf(Consumer... consumers) { + return Arrays.stream(consumers).anyMatch(c -> this == c); + } } private static class ModelAndConsumer { @@ -218,13 +283,23 @@ public void getModelForInternalInference(String modelId, ActionListener modelActionListener) { - getModel(modelId, Consumer.SEARCH, null, modelActionListener); + public void getModelForAggregation(String modelId, ActionListener modelActionListener) { + getModel(modelId, Consumer.SEARCH_AGGS, null, modelActionListener); + } + + /** + * Load the model for use by at search for rescoring. Models requested by search are always cached. + * + * @param modelId the model to get + * @param modelActionListener the listener to alert when the model has been retrieved + */ + public void getModelForLearnToRank(String modelId, ActionListener modelActionListener) { + getModel(modelId, Consumer.SEARCH_RESCORER, null, modelActionListener); } /** @@ -258,6 +333,18 @@ private void getModel(String modelIdOrAlias, Consumer consumer, TaskId parentTas final String modelId = modelAliasToId.getOrDefault(modelIdOrAlias, modelIdOrAlias); ModelAndConsumer cachedModel = localModelCache.get(modelId); if (cachedModel != null) { + // Even if the model is already cached, we don't want to use the model in an unsupported task + if (consumer.inferenceConfigSupported(cachedModel.model.getInferenceConfig()) == false) { + modelActionListener.onFailure( + modelUnsupportedInUsageContext( + modelId, + cachedModel.model.getTrainedModelType(), + cachedModel.model.getInferenceConfig(), + consumer + ) + ); + return; + } cachedModel.consumers.add(consumer); try { cachedModel.model.acquire(); @@ -295,46 +382,71 @@ private boolean loadModelIfNecessary( TaskId parentTaskId, ActionListener modelActionListener ) { - synchronized (loadingListeners) { - final String modelId = modelAliasToId.getOrDefault(modelIdOrAlias, modelIdOrAlias); - ModelAndConsumer cachedModel = localModelCache.get(modelId); - if (cachedModel != null) { - cachedModel.consumers.add(consumer); - try { - cachedModel.model.acquire(); - } catch (CircuitBreakingException e) { - modelActionListener.onFailure(e); + final SetOnce exceptionToNotifyListener = new SetOnce<>(); + final SetOnce localModelToNotifyListener = new SetOnce<>(); + final SetOnce modelLoadingRunnable = new SetOnce<>(); + try { + synchronized (loadingListeners) { + final String modelId = modelAliasToId.getOrDefault(modelIdOrAlias, modelIdOrAlias); + ModelAndConsumer cachedModel = localModelCache.get(modelId); + if (cachedModel != null) { + cachedModel.consumers.add(consumer); + try { + cachedModel.model.acquire(); + } catch (CircuitBreakingException e) { + exceptionToNotifyListener.set(e); + return true; + } + localModelToNotifyListener.set(cachedModel.model); return true; } - modelActionListener.onResponse(cachedModel.model); - return true; - } - - // Add the listener to the queue if the model is loading - Queue> listeners = loadingListeners.computeIfPresent( - modelId, - (storedModelKey, listenerQueue) -> addFluently(listenerQueue, modelActionListener) - ); + // Add the listener to the queue if the model is loading + Queue> listeners = loadingListeners.computeIfPresent( + modelId, + (storedModelKey, listenerQueue) -> addFluently(listenerQueue, modelActionListener) + ); - // The cachedModel entry is null, but there are listeners present, that means it is being loaded - if (listeners != null) { - return true; - } + // The cachedModel entry is null, but there are listeners present, that means it is being loaded + // If it is already being loaded, we don't need to start another loading process and we know the listener will + // eventually be called + if (listeners != null) { + return true; + } - if (Consumer.SEARCH != consumer && referencedModels.contains(modelId) == false) { - // The model is requested by a pipeline but not referenced by any ingest pipelines. - // This means it is a simulate call and the model should not be cached - logger.trace( - () -> format("[%s] (model_alias [%s]) not actively loading, eager loading without cache", modelId, modelIdOrAlias) - ); - loadWithoutCaching(modelId, consumer, parentTaskId, modelActionListener); - } else { - logger.trace(() -> format("[%s] (model_alias [%s]) attempting to load and cache", modelId, modelIdOrAlias)); - loadingListeners.put(modelId, addFluently(new ArrayDeque<>(), modelActionListener)); - loadModel(modelId, consumer); + // The model is not currently being loaded (indicated by listeners check above). + // So start a new load outside of the synchronized block. + if (consumer.isAnyOf(Consumer.SEARCH_AGGS, Consumer.SEARCH_RESCORER) == false + && referencedModels.contains(modelId) == false) { + // The model is requested by a pipeline but not referenced by any ingest pipelines. + // This means it is a simulate call and the model should not be cached + logger.trace( + () -> format("[%s] (model_alias [%s]) not actively loading, eager loading without cache", modelId, modelIdOrAlias) + ); + modelLoadingRunnable.set(() -> loadWithoutCaching(modelId, consumer, parentTaskId, modelActionListener)); + } else { + logger.trace(() -> format("[%s] (model_alias [%s]) attempting to load and cache", modelId, modelIdOrAlias)); + loadingListeners.put(modelId, addFluently(new ArrayDeque<>(), modelActionListener)); + modelLoadingRunnable.set(() -> loadModel(modelId, consumer)); + } + return false; + } // synchronized (loadingListeners) + } finally { + // Notify the passed listener if the model was already in cache or an exception was thrown + // However, if we don't notify the listener here, + // it will be notified when the model is loaded. Either via the runnable below or some already existing loading thread. + assert exceptionToNotifyListener.get() == null || localModelToNotifyListener.get() == null + : "both exception and local model set"; + if (exceptionToNotifyListener.get() != null) { + assert modelLoadingRunnable.get() == null : "Exception encountered, model loading runnable should be null"; + modelActionListener.onFailure(exceptionToNotifyListener.get()); + } else if (localModelToNotifyListener.get() != null) { + assert modelLoadingRunnable.get() == null : "Model was cached, model loading runnable should be null"; + modelActionListener.onResponse(localModelToNotifyListener.get()); + } else if (modelLoadingRunnable.get() != null) { + // We needed to start the model loading, with or without caching. We execute this outside of the synchronous block + modelLoadingRunnable.get().run(); } - return false; - } // synchronized (loadingListeners) + } } private void loadModel(String modelId, Consumer consumer) { @@ -342,19 +454,19 @@ private void loadModel(String modelId, Consumer consumer) { // We don't want to cancel the loading if only ONE of them stops listening or closes connection // TODO Is there a way to only signal a cancel if all the listener tasks cancel??? provider.getTrainedModel(modelId, GetTrainedModelsAction.Includes.empty(), null, ActionListener.wrap(trainedModelConfig -> { - if (trainedModelConfig.isAllocateOnly()) { - if (consumer == Consumer.SEARCH) { - handleLoadFailure( + if (consumer.inferenceConfigSupported(trainedModelConfig.getInferenceConfig()) == false) { + handleLoadFailure( + modelId, + modelUnsupportedInUsageContext( modelId, - new ElasticsearchStatusException( - "Trained model [{}] with type [{}] is currently not usable in search.", - RestStatus.BAD_REQUEST, - modelId, - trainedModelConfig.getModelType() - ) - ); - return; - } + trainedModelConfig.getModelType(), + trainedModelConfig.getInferenceConfig(), + consumer + ) + ); + return; + } + if (trainedModelConfig.isAllocateOnly()) { handleLoadFailure(modelId, modelMustBeDeployedError(modelId)); return; } @@ -393,19 +505,21 @@ private void loadWithoutCaching( // If we the model is not loaded and we did not kick off a new loading attempt, this means that we may be getting called // by a simulated pipeline provider.getTrainedModel(modelId, GetTrainedModelsAction.Includes.empty(), parentTaskId, ActionListener.wrap(trainedModelConfig -> { + // If the model is used in an unsupported context, fail here + if (consumer.inferenceConfigSupported(trainedModelConfig.getInferenceConfig()) == false) { + handleLoadFailure( + modelId, + modelUnsupportedInUsageContext( + modelId, + trainedModelConfig.getModelType(), + trainedModelConfig.getInferenceConfig(), + consumer + ) + ); + return; + } // If the model should be allocated, we should fail here if (trainedModelConfig.isAllocateOnly()) { - if (consumer == Consumer.SEARCH) { - modelActionListener.onFailure( - new ElasticsearchStatusException( - "model [{}] with type [{}] is currently not usable in search.", - RestStatus.BAD_REQUEST, - modelId, - trainedModelConfig.getModelType() - ) - ); - return; - } modelActionListener.onFailure(modelMustBeDeployedError(modelId)); return; } @@ -431,6 +545,7 @@ private void loadWithoutCaching( trainedModelConfig.getDefaultFieldMap(), inferenceConfig, trainedModelConfig.getLicenseLevel(), + trainedModelConfig.getModelType(), modelStatsService, trainedModelCircuitBreaker ) @@ -474,7 +589,7 @@ private void updateCircuitBreakerEstimate( } } - private ElasticsearchStatusException modelMustBeDeployedError(String modelId) { + private static ElasticsearchStatusException modelMustBeDeployedError(String modelId) { return new ElasticsearchStatusException( "Model [{}] must be deployed to use. Please deploy with the start trained model deployment API.", RestStatus.BAD_REQUEST, @@ -482,6 +597,22 @@ private ElasticsearchStatusException modelMustBeDeployedError(String modelId) { ); } + private static ElasticsearchStatusException modelUnsupportedInUsageContext( + String modelId, + TrainedModelType modelType, + InferenceConfig inferenceConfig, + Consumer consumer + ) { + return new ElasticsearchStatusException( + "Trained model [{}] with type [{}] and task [{}] is currently not usable in [{}].", + RestStatus.BAD_REQUEST, + modelId, + modelType, + Optional.ofNullable(inferenceConfig).map(InferenceConfig::getName).orElse("_unknown_"), + consumer.exceptionName() + ); + } + private void handleLoadSuccess( String modelId, Consumer consumer, @@ -500,6 +631,7 @@ private void handleLoadSuccess( trainedModelConfig.getDefaultFieldMap(), inferenceConfig, trainedModelConfig.getLicenseLevel(), + Optional.ofNullable(trainedModelConfig.getModelType()).orElse(TrainedModelType.TREE_ENSEMBLE), modelStatsService, trainedModelCircuitBreaker ); @@ -510,7 +642,7 @@ private void handleLoadSuccess( // Also, if the consumer is a search consumer, we should always cache it if (referencedModels.contains(modelId) || Sets.haveNonEmptyIntersection(modelIdToModelAliases.getOrDefault(modelId, new HashSet<>()), referencedModels) - || consumer.equals(Consumer.SEARCH)) { + || consumer.equals(Consumer.SEARCH_AGGS)) { try { // The local model may already be in cache. If it is, we don't bother adding it to cache. // If it isn't, we flip an `isLoaded` flag, and increment the model counter to make sure if it is evicted @@ -673,7 +805,7 @@ public void clusterChanged(ClusterChangedEvent event) { ); if (oldModelAliasesNotReferenced && newModelAliasesNotReferenced && modelIsNotReferenced) { ModelAndConsumer modelAndConsumer = localModelCache.get(modelId); - if (modelAndConsumer != null && modelAndConsumer.consumers.contains(Consumer.SEARCH) == false) { + if (modelAndConsumer != null && modelAndConsumer.consumers.contains(Consumer.SEARCH_AGGS) == false) { logger.trace("[{} ({})] invalidated from cache", modelId, modelAliasOrId); localModelCache.invalidate(modelId); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/FeatureExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/FeatureExtractor.java new file mode 100644 index 000000000000..36bf36ef99c5 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/FeatureExtractor.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.inference.rescorer; + +import org.apache.lucene.index.LeafReaderContext; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +public interface FeatureExtractor { + void setNextReader(LeafReaderContext segmentContext) throws IOException; + + void addFeatures(Map featureMap, int docId) throws IOException; + + List featureNames(); +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/FieldValueFeatureExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/FieldValueFeatureExtractor.java new file mode 100644 index 000000000000..9f0ef84fc357 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/FieldValueFeatureExtractor.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.inference.rescorer; + +import org.apache.lucene.index.LeafReaderContext; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.ValueFetcher; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.search.lookup.SearchLookup; +import org.elasticsearch.search.lookup.Source; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public class FieldValueFeatureExtractor implements FeatureExtractor { + + record FieldValueFetcher(String fieldName, ValueFetcher valueFetcher) {} + + private LeafReaderContext segmentContext; + private final List documentFieldNames; + private final List valueFetcherList; + private final SearchLookup sourceLookup; + + FieldValueFeatureExtractor(List documentFieldNames, SearchExecutionContext executionContext) { + this.documentFieldNames = documentFieldNames; + this.valueFetcherList = documentFieldNames.stream().map(s -> { + MappedFieldType mappedFieldType = executionContext.getFieldType(s); + if (mappedFieldType != null) { + return new FieldValueFetcher(s, mappedFieldType.valueFetcher(executionContext, null)); + } + return null; + }).filter(Objects::nonNull).toList(); + this.sourceLookup = executionContext.lookup(); + } + + @Override + public void setNextReader(LeafReaderContext segmentContext) { + this.segmentContext = segmentContext; + for (FieldValueFetcher vf : valueFetcherList) { + vf.valueFetcher().setNextReader(segmentContext); + } + } + + @Override + public void addFeatures(Map featureMap, int docId) throws IOException { + Source source = sourceLookup.getSource(this.segmentContext, docId); + for (FieldValueFetcher vf : this.valueFetcherList) { + featureMap.put(vf.fieldName(), vf.valueFetcher().fetchValues(source, docId, new ArrayList<>()).get(0)); + } + } + + @Override + public List featureNames() { + return documentFieldNames; + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorer.java new file mode 100644 index 000000000000..e8905975b052 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorer.java @@ -0,0 +1,136 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.inference.rescorer; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.TopDocs; +import org.elasticsearch.common.util.Maps; +import org.elasticsearch.search.rescore.RescoreContext; +import org.elasticsearch.search.rescore.Rescorer; +import org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearnToRankConfigUpdate; +import org.elasticsearch.xpack.ml.inference.loadingservice.LocalModel; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static java.util.stream.Collectors.toUnmodifiableSet; + +public class InferenceRescorer implements Rescorer { + + public static final InferenceRescorer INSTANCE = new InferenceRescorer(); + private static final Logger logger = LogManager.getLogger(InferenceRescorer.class); + + private static final Comparator SCORE_DOC_COMPARATOR = (o1, o2) -> { + int cmp = Float.compare(o2.score, o1.score); + return cmp == 0 ? Integer.compare(o1.doc, o2.doc) : cmp; + }; + + private InferenceRescorer() { + + } + + @Override + public TopDocs rescore(TopDocs topDocs, IndexSearcher searcher, RescoreContext rescoreContext) throws IOException { + if (topDocs.scoreDocs.length == 0) { + return topDocs; + } + InferenceRescorerContext ltrRescoreContext = (InferenceRescorerContext) rescoreContext; + if (ltrRescoreContext.inferenceDefinition == null) { + throw new IllegalStateException("local model reference is null, missing rewriteAndFetch before rescore phase?"); + } + LocalModel definition = ltrRescoreContext.inferenceDefinition; + + // First take top slice of incoming docs, to be rescored: + TopDocs topNFirstPass = topN(topDocs, rescoreContext.getWindowSize()); + // Save doc IDs for which rescoring was applied to be used in score explanation + Set topNDocIDs = Arrays.stream(topNFirstPass.scoreDocs).map(scoreDoc -> scoreDoc.doc).collect(toUnmodifiableSet()); + rescoreContext.setRescoredDocs(topNDocIDs); + ScoreDoc[] hitsToRescore = topNFirstPass.scoreDocs; + Arrays.sort(hitsToRescore, Comparator.comparingInt(a -> a.doc)); + int hitUpto = 0; + int readerUpto = -1; + int endDoc = 0; + int docBase = 0; + List leaves = ltrRescoreContext.executionContext.searcher().getIndexReader().leaves(); + LeafReaderContext currentSegment = null; + boolean changedSegment = true; + List featureExtractors = ltrRescoreContext.buildFeatureExtractors(); + List> docFeatures = new ArrayList<>(topNDocIDs.size()); + int featureSize = featureExtractors.stream().mapToInt(fe -> fe.featureNames().size()).sum(); + while (hitUpto < hitsToRescore.length) { + final ScoreDoc hit = hitsToRescore[hitUpto]; + final int docID = hit.doc; + while (docID >= endDoc) { + readerUpto++; + currentSegment = leaves.get(readerUpto); + endDoc = currentSegment.docBase + currentSegment.reader().maxDoc(); + changedSegment = true; + } + assert currentSegment != null : "Unexpected null segment"; + if (changedSegment) { + // We advanced to another segment and update our document value fetchers + docBase = currentSegment.docBase; + for (FeatureExtractor featureExtractor : featureExtractors) { + featureExtractor.setNextReader(currentSegment); + } + changedSegment = false; + } + int targetDoc = docID - docBase; + Map features = Maps.newMapWithExpectedSize(featureSize); + for (FeatureExtractor featureExtractor : featureExtractors) { + featureExtractor.addFeatures(features, targetDoc); + } + docFeatures.add(features); + hitUpto++; + } + for (int i = 0; i < hitsToRescore.length; i++) { + Map features = docFeatures.get(i); + try { + hitsToRescore[i].score = ((Number) definition.infer(features, LearnToRankConfigUpdate.EMPTY_PARAMS).predictedValue()) + .floatValue(); + } catch (Exception ex) { + logger.warn("Failure rescoring doc...", ex); + } + } + assert rescoreContext.getWindowSize() >= hitsToRescore.length + : "unexpected, windows size [" + rescoreContext.getWindowSize() + "] should be gte [" + hitsToRescore.length + "]"; + + Arrays.sort(topDocs.scoreDocs, SCORE_DOC_COMPARATOR); + return topDocs; + } + + @Override + public Explanation explain(int topLevelDocId, IndexSearcher searcher, RescoreContext rescoreContext, Explanation sourceExplanation) + throws IOException { + // TODO: Call infer again but with individual feature importance values and explaining the model (which features are used, etc.) + return null; + } + + /** Returns a new {@link TopDocs} with the topN from the incoming one, or the same TopDocs if the number of hits is already <= + * topN. */ + private static TopDocs topN(TopDocs in, int topN) { + if (in.scoreDocs.length < topN) { + return in; + } + + ScoreDoc[] subset = new ScoreDoc[topN]; + System.arraycopy(in.scoreDocs, 0, subset, 0, topN); + + return new TopDocs(in.totalHits, subset); + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerBuilder.java new file mode 100644 index 000000000000..0885e0e5ffce --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerBuilder.java @@ -0,0 +1,188 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.inference.rescorer; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.search.rescore.RescorerBuilder; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.ml.inference.loadingservice.LocalModel; +import org.elasticsearch.xpack.ml.inference.loadingservice.ModelLoadingService; + +import java.io.IOException; +import java.util.Objects; +import java.util.function.Supplier; + +public class InferenceRescorerBuilder extends RescorerBuilder { + + public static final String NAME = "inference"; + private static final ParseField MODEL = new ParseField("model_id"); + private static final ObjectParser PARSER = new ObjectParser<>(NAME, false, Builder::new); + static { + PARSER.declareString(Builder::setModelId, MODEL); + } + + public static InferenceRescorerBuilder fromXContent(XContentParser parser, Supplier modelLoadingServiceSupplier) { + return PARSER.apply(parser, null).build(modelLoadingServiceSupplier); + } + + private final String modelId; + private final LocalModel inferenceDefinition; + private final Supplier inferenceDefinitionSupplier; + private final Supplier modelLoadingServiceSupplier; + private boolean rescoreOccurred; + + public InferenceRescorerBuilder(String modelId, Supplier modelLoadingServiceSupplier) { + this.modelId = Objects.requireNonNull(modelId); + this.modelLoadingServiceSupplier = modelLoadingServiceSupplier; + this.inferenceDefinition = null; + this.inferenceDefinitionSupplier = null; + } + + InferenceRescorerBuilder(String modelId, LocalModel inferenceDefinition) { + this.modelId = Objects.requireNonNull(modelId); + this.inferenceDefinition = Objects.requireNonNull(inferenceDefinition); + this.inferenceDefinitionSupplier = null; + this.modelLoadingServiceSupplier = null; + } + + private InferenceRescorerBuilder( + String modelId, + Supplier modelLoadingServiceSupplier, + Supplier inferenceDefinitionSupplier + ) { + this.modelId = modelId; + this.inferenceDefinition = null; + this.inferenceDefinitionSupplier = inferenceDefinitionSupplier; + this.modelLoadingServiceSupplier = modelLoadingServiceSupplier; + } + + public InferenceRescorerBuilder(StreamInput input, Supplier modelLoadingServiceSupplier) throws IOException { + super(input); + this.modelId = input.readString(); + this.inferenceDefinitionSupplier = null; + this.inferenceDefinition = null; + this.modelLoadingServiceSupplier = modelLoadingServiceSupplier; + } + + @Override + public String getWriteableName() { + return NAME; + } + + /** + * should be updated once {@link InferenceRescorerFeature} is removed + */ + @Override + public TransportVersion getMinimalSupportedVersion() { + // TODO: update transport version when released! + return TransportVersion.current(); + } + + @Override + public RescorerBuilder rewrite(QueryRewriteContext ctx) throws IOException { + if (inferenceDefinition != null) { + return this; + } + if (inferenceDefinitionSupplier != null) { + if (inferenceDefinitionSupplier.get() == null) { + return this; + } + LocalModel inferenceDefinition = inferenceDefinitionSupplier.get(); + InferenceRescorerBuilder builder = new InferenceRescorerBuilder(modelId, inferenceDefinition); + if (windowSize() != null) { + builder.windowSize(windowSize()); + } + return builder; + } + // We don't want to rewrite on the coordinator as that doesn't make sense for this rescorer + if (ctx.convertToDataRewriteContext() != null) { + if (modelLoadingServiceSupplier == null || modelLoadingServiceSupplier.get() == null) { + throw new IllegalStateException("Model loading service must be available"); + } + SetOnce inferenceDefinitionSetOnce = new SetOnce<>(); + ctx.registerAsyncAction((c, l) -> modelLoadingServiceSupplier.get().getModelForLearnToRank(modelId, ActionListener.wrap(lm -> { + inferenceDefinitionSetOnce.set(lm); + l.onResponse(null); + }, l::onFailure))); + InferenceRescorerBuilder builder = new InferenceRescorerBuilder( + modelId, + modelLoadingServiceSupplier, + inferenceDefinitionSetOnce::get + ); + if (windowSize() != null) { + builder.windowSize(windowSize()); + } + return builder; + } + return this; + } + + public String getModelId() { + return modelId; + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + if (inferenceDefinitionSupplier != null) { + throw new IllegalStateException("supplier must be null, missing a rewriteAndFetch?"); + } + assert inferenceDefinition == null || rescoreOccurred : "Unnecessarily populated local model object"; + out.writeString(modelId); + } + + @Override + protected void doXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(NAME); + builder.field(MODEL.getPreferredName(), modelId); + builder.endObject(); + } + + @Override + protected InferenceRescorerContext innerBuildContext(int windowSize, SearchExecutionContext context) { + rescoreOccurred = true; + return new InferenceRescorerContext(windowSize, InferenceRescorer.INSTANCE, inferenceDefinition, context); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (super.equals(o) == false) return false; + InferenceRescorerBuilder that = (InferenceRescorerBuilder) o; + return Objects.equals(modelId, that.modelId) + && Objects.equals(inferenceDefinition, that.inferenceDefinition) + && Objects.equals(inferenceDefinitionSupplier, that.inferenceDefinitionSupplier) + && Objects.equals(modelLoadingServiceSupplier, that.modelLoadingServiceSupplier); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), modelId, inferenceDefinition, inferenceDefinitionSupplier, modelLoadingServiceSupplier); + } + + private static class Builder { + private String modelId; + + public void setModelId(String modelId) { + this.modelId = modelId; + } + + InferenceRescorerBuilder build(Supplier modelLoadingServiceSupplier) { + return new InferenceRescorerBuilder(modelId, modelLoadingServiceSupplier); + } + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerContext.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerContext.java new file mode 100644 index 000000000000..4e2cbfb8d3ac --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerContext.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.inference.rescorer; + +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.search.rescore.RescoreContext; +import org.elasticsearch.search.rescore.Rescorer; +import org.elasticsearch.xpack.ml.inference.loadingservice.LocalModel; + +import java.util.ArrayList; +import java.util.List; + +public class InferenceRescorerContext extends RescoreContext { + + final SearchExecutionContext executionContext; + final LocalModel inferenceDefinition; + + /** + * @param windowSize how many documents to rescore + * @param rescorer The rescorer to apply + * @param inferenceDefinition The local model inference definition, may be null during certain search phases. + * @param executionContext The local shard search context + */ + public InferenceRescorerContext( + int windowSize, + Rescorer rescorer, + LocalModel inferenceDefinition, + SearchExecutionContext executionContext + ) { + super(windowSize, rescorer); + this.executionContext = executionContext; + this.inferenceDefinition = inferenceDefinition; + } + + List buildFeatureExtractors() { + assert this.inferenceDefinition != null; + List featureExtractors = new ArrayList<>(); + if (this.inferenceDefinition.inputFields().isEmpty() == false) { + featureExtractors.add( + new FieldValueFeatureExtractor(new ArrayList<>(this.inferenceDefinition.inputFields()), this.executionContext) + ); + } + return featureExtractors; + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerFeature.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerFeature.java new file mode 100644 index 000000000000..2b88faa3e4c1 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerFeature.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.inference.rescorer; + +import org.elasticsearch.common.util.FeatureFlag; + +/** + * Inference rescorer feature flag. When the feature is complete, this flag will be removed. + * + * Upon removal, ensure transport serialization is all corrected for future BWC. + * + * See {@link InferenceRescorerBuilder} + */ +public class InferenceRescorerFeature { + + private InferenceRescorerFeature() {} + + private static final FeatureFlag INFERENCE_RESCORE_FEATURE_FLAG = new FeatureFlag("inference_rescorer"); + + public static boolean isEnabled() { + return INFERENCE_RESCORE_FEATURE_FLAG.isEnabled(); + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsActionTests.java index ecc0b0b1e85e..1fabd7d02cf5 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsActionTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -201,7 +202,7 @@ private static DiscoveryNode createNode(int i, boolean isMlNode, Version nodeVer isMlNode ? Set.of(DiscoveryNodeRole.MASTER_ROLE, DiscoveryNodeRole.DATA_ROLE, DiscoveryNodeRole.ML_ROLE) : Set.of(DiscoveryNodeRole.MASTER_ROLE, DiscoveryNodeRole.DATA_ROLE), - nodeVersion + VersionInformation.inferVersions(nodeVersion) ); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterServiceTests.java index 3b93d8b44aa2..282005e87e1d 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterServiceTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.MapBuilder; @@ -1467,10 +1468,16 @@ private TrainedModelAssignmentClusterService createClusterService(int maxLazyNod } private static DiscoveryNode buildNode(String name, boolean isML, long nativeMemory, int allocatedProcessors) { - return buildNode(name, isML, nativeMemory, allocatedProcessors, Version.CURRENT); + return buildNode(name, isML, nativeMemory, allocatedProcessors, VersionInformation.CURRENT); } - private static DiscoveryNode buildNode(String name, boolean isML, long nativeMemory, int allocatedProcessors, Version version) { + private static DiscoveryNode buildNode( + String name, + boolean isML, + long nativeMemory, + int allocatedProcessors, + VersionInformation version + ) { return new DiscoveryNode( name, name, @@ -1490,7 +1497,7 @@ private static RoutingInfoUpdate started() { } private static DiscoveryNode buildOldNode(String name, boolean isML, long nativeMemory, int allocatedProcessors) { - return buildNode(name, isML, nativeMemory, allocatedProcessors, Version.V_7_15_0); + return buildNode(name, isML, nativeMemory, allocatedProcessors, VersionInformation.inferVersions(Version.V_7_15_0)); } private static StartTrainedModelDeploymentAction.TaskParams newParams(String modelId, long modelSize) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/LocalModelTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/LocalModelTests.java index 5cebcf076133..4709925dbe7d 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/LocalModelTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/LocalModelTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.license.License; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.inference.TrainedModelInput; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelType; import org.elasticsearch.xpack.core.ml.inference.preprocessing.OneHotEncoding; import org.elasticsearch.xpack.core.ml.inference.results.ClassificationInferenceResults; import org.elasticsearch.xpack.core.ml.inference.results.InferenceResults; @@ -81,6 +82,7 @@ public void testClassificationInfer() throws Exception { Collections.singletonMap("field.foo", "field.foo.keyword"), ClassificationConfig.EMPTY_PARAMS, randomFrom(License.OperationMode.values()), + TrainedModelType.TREE_ENSEMBLE, modelStatsService, mock(CircuitBreaker.class) ); @@ -119,6 +121,7 @@ public void testClassificationInfer() throws Exception { Collections.singletonMap("field.foo", "field.foo.keyword"), ClassificationConfig.EMPTY_PARAMS, License.OperationMode.PLATINUM, + TrainedModelType.TREE_ENSEMBLE, modelStatsService, mock(CircuitBreaker.class) ); @@ -171,6 +174,7 @@ public void testClassificationInferWithDifferentPredictionFieldTypes() throws Ex Collections.singletonMap("field.foo", "field.foo.keyword"), ClassificationConfig.EMPTY_PARAMS, License.OperationMode.PLATINUM, + TrainedModelType.TREE_ENSEMBLE, modelStatsService, mock(CircuitBreaker.class) ); @@ -233,6 +237,7 @@ public void testRegression() throws Exception { Collections.singletonMap("bar", "bar.keyword"), RegressionConfig.EMPTY_PARAMS, License.OperationMode.PLATINUM, + TrainedModelType.TREE_ENSEMBLE, modelStatsService, mock(CircuitBreaker.class) ); @@ -265,6 +270,7 @@ public void testAllFieldsMissing() throws Exception { null, RegressionConfig.EMPTY_PARAMS, License.OperationMode.PLATINUM, + TrainedModelType.TREE_ENSEMBLE, modelStatsService, mock(CircuitBreaker.class) ); @@ -300,6 +306,7 @@ public void testInferPersistsStatsAfterNumberOfCalls() throws Exception { null, ClassificationConfig.EMPTY_PARAMS, License.OperationMode.PLATINUM, + TrainedModelType.TREE_ENSEMBLE, modelStatsService, mock(CircuitBreaker.class) ); @@ -359,6 +366,7 @@ public void testReferenceCounting() throws IOException { null, ClassificationConfig.EMPTY_PARAMS, License.OperationMode.PLATINUM, + TrainedModelType.TREE_ENSEMBLE, modelStatsService, breaker ); @@ -385,6 +393,7 @@ public void testReferenceCounting() throws IOException { null, ClassificationConfig.EMPTY_PARAMS, License.OperationMode.PLATINUM, + TrainedModelType.TREE_ENSEMBLE, modelStatsService, breaker ); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingServiceTests.java index 11e7fe04af2c..644b61953774 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingServiceTests.java @@ -414,7 +414,7 @@ public void testGetModelForSearch() throws Exception { for (int i = 0; i < 3; i++) { PlainActionFuture future = new PlainActionFuture<>(); - modelLoadingService.getModelForSearch(modelId, future); + modelLoadingService.getModelForAggregation(modelId, future); assertThat(future.get(), is(not(nullValue()))); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerBuilderRewriteTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerBuilderRewriteTests.java new file mode 100644 index 000000000000..710c9a49bbfd --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerBuilderRewriteTests.java @@ -0,0 +1,138 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.inference.rescorer; + +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TotalHits; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.query.CoordinatorRewriteContext; +import org.elasticsearch.index.query.DataRewriteContext; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.test.AbstractBuilderTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.ml.inference.TrainedModelStatsService; +import org.elasticsearch.xpack.ml.inference.loadingservice.LocalModel; +import org.elasticsearch.xpack.ml.inference.loadingservice.ModelLoadingService; +import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelProvider; +import org.elasticsearch.xpack.ml.notifications.InferenceAuditor; + +import java.io.IOException; +import java.util.List; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class InferenceRescorerBuilderRewriteTests extends AbstractBuilderTestCase { + + public void testMustRewrite() { + TestModelLoader testModelLoader = new TestModelLoader(); + InferenceRescorerBuilder inferenceRescorerBuilder = new InferenceRescorerBuilder("modelId", () -> testModelLoader); + SearchExecutionContext context = createSearchExecutionContext(); + InferenceRescorerContext inferenceRescorerContext = inferenceRescorerBuilder.innerBuildContext(randomIntBetween(1, 30), context); + IllegalStateException e = expectThrows( + IllegalStateException.class, + () -> inferenceRescorerContext.rescorer() + .rescore( + new TopDocs(new TotalHits(10, TotalHits.Relation.EQUAL_TO), new ScoreDoc[10]), + mock(IndexSearcher.class), + inferenceRescorerContext + ) + ); + assertEquals("local model reference is null, missing rewriteAndFetch before rescore phase?", e.getMessage()); + } + + public void testRewriteOnCoordinator() throws IOException { + TestModelLoader testModelLoader = new TestModelLoader(); + InferenceRescorerBuilder inferenceRescorerBuilder = new InferenceRescorerBuilder("modelId", () -> testModelLoader); + CoordinatorRewriteContext context = createCoordinatorRewriteContext( + new DateFieldMapper.DateFieldType("@timestamp"), + randomIntBetween(0, 1_100_000), + randomIntBetween(1_500_000, Integer.MAX_VALUE) + ); + InferenceRescorerBuilder rewritten = (InferenceRescorerBuilder) inferenceRescorerBuilder.rewrite(context); + assertSame(inferenceRescorerBuilder, rewritten); + assertFalse(context.hasAsyncActions()); + } + + public void testRewriteOnShard() throws IOException { + TestModelLoader testModelLoader = new TestModelLoader(); + InferenceRescorerBuilder inferenceRescorerBuilder = new InferenceRescorerBuilder("modelId", () -> testModelLoader); + SearchExecutionContext searchExecutionContext = createSearchExecutionContext(); + InferenceRescorerBuilder rewritten = (InferenceRescorerBuilder) inferenceRescorerBuilder.rewrite(createSearchExecutionContext()); + assertSame(inferenceRescorerBuilder, rewritten); + assertFalse(searchExecutionContext.hasAsyncActions()); + } + + public void testRewriteAndFetchOnDataNode() throws IOException { + TestModelLoader testModelLoader = new TestModelLoader(); + InferenceRescorerBuilder inferenceRescorerBuilder = new InferenceRescorerBuilder("modelId", () -> testModelLoader); + boolean setWindowSize = randomBoolean(); + if (setWindowSize) { + inferenceRescorerBuilder.windowSize(42); + } + DataRewriteContext rewriteContext = dataRewriteContext(); + InferenceRescorerBuilder rewritten = (InferenceRescorerBuilder) inferenceRescorerBuilder.rewrite(rewriteContext); + assertNotSame(inferenceRescorerBuilder, rewritten); + assertTrue(rewriteContext.hasAsyncActions()); + if (setWindowSize) { + assertThat(rewritten.windowSize(), equalTo(42)); + } + } + + public void testBuildContext() { + LocalModel localModel = localModel(); + List inputFields = List.of(DOUBLE_FIELD_NAME, INT_FIELD_NAME); + when(localModel.inputFields()).thenReturn(inputFields); + SearchExecutionContext context = createSearchExecutionContext(); + InferenceRescorerBuilder inferenceRescorerBuilder = new InferenceRescorerBuilder("test_model", localModel); + InferenceRescorerContext rescoreContext = inferenceRescorerBuilder.innerBuildContext(20, context); + assertNotNull(rescoreContext); + assertThat(rescoreContext.getWindowSize(), equalTo(20)); + List featureExtractors = rescoreContext.buildFeatureExtractors(); + assertThat(featureExtractors, hasSize(1)); + assertThat( + featureExtractors.stream().flatMap(featureExtractor -> featureExtractor.featureNames().stream()).toList(), + containsInAnyOrder(DOUBLE_FIELD_NAME, INT_FIELD_NAME) + ); + } + + private static LocalModel localModel() { + return mock(LocalModel.class); + } + + private static class TestModelLoader extends ModelLoadingService { + TestModelLoader() { + super( + mock(TrainedModelProvider.class), + mock(InferenceAuditor.class), + mock(ThreadPool.class), + mock(ClusterService.class), + mock(TrainedModelStatsService.class), + Settings.EMPTY, + "test", + mock(CircuitBreaker.class), + new XPackLicenseState(System::currentTimeMillis) + ); + } + + @Override + public void getModelForLearnToRank(String modelId, ActionListener modelActionListener) { + modelActionListener.onResponse(localModel()); + } + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerBuilderSerializationTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerBuilderSerializationTests.java new file mode 100644 index 000000000000..2ca319d4316b --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/rescorer/InferenceRescorerBuilderSerializationTests.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.inference.rescorer; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.AbstractBWCSerializationTestCase; +import org.elasticsearch.xpack.ml.inference.loadingservice.ModelLoadingService; + +import java.io.IOException; +import java.util.function.Supplier; + +import static org.elasticsearch.search.rank.RankBuilder.WINDOW_SIZE_FIELD; + +public class InferenceRescorerBuilderSerializationTests extends AbstractBWCSerializationTestCase { + + @Override + protected InferenceRescorerBuilder doParseInstance(XContentParser parser) throws IOException { + String fieldName = null; + InferenceRescorerBuilder rescorer = null; + Integer windowSize = null; + XContentParser.Token token = parser.nextToken(); + assert token == XContentParser.Token.START_OBJECT; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + fieldName = parser.currentName(); + } else if (token.isValue()) { + if (WINDOW_SIZE_FIELD.match(fieldName, parser.getDeprecationHandler())) { + windowSize = parser.intValue(); + } else { + throw new ParsingException(parser.getTokenLocation(), "rescore doesn't support [" + fieldName + "]"); + } + } else if (token == XContentParser.Token.START_OBJECT) { + rescorer = InferenceRescorerBuilder.fromXContent(parser, null); + } else { + throw new ParsingException(parser.getTokenLocation(), "unexpected token [" + token + "] after [" + fieldName + "]"); + } + } + if (rescorer == null) { + throw new ParsingException(parser.getTokenLocation(), "missing rescore type"); + } + if (windowSize != null) { + rescorer.windowSize(windowSize); + } + return rescorer; + } + + @Override + protected Writeable.Reader instanceReader() { + return in -> new InferenceRescorerBuilder(in, null); + } + + @Override + protected InferenceRescorerBuilder createTestInstance() { + InferenceRescorerBuilder builder = new InferenceRescorerBuilder(randomAlphaOfLength(10), (Supplier) null); + if (randomBoolean()) { + builder.windowSize(randomIntBetween(1, 10000)); + } + return builder; + } + + @Override + protected InferenceRescorerBuilder mutateInstance(InferenceRescorerBuilder instance) throws IOException { + int i = randomInt(1); + return switch (i) { + case 0 -> new InferenceRescorerBuilder( + randomValueOtherThan(instance.getModelId(), () -> randomAlphaOfLength(10)), + (Supplier) null + ); + case 1 -> new InferenceRescorerBuilder(instance.getModelId(), (Supplier) null).windowSize( + randomValueOtherThan(instance.windowSize(), () -> randomIntBetween(1, 10000)) + ); + default -> throw new AssertionError("Unexpected random test case"); + }; + } + + @Override + protected InferenceRescorerBuilder mutateInstanceForVersion(InferenceRescorerBuilder instance, TransportVersion version) { + return instance; + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobNodeSelectorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobNodeSelectorTests.java index f83e14d41332..3182d03e4507 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobNodeSelectorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobNodeSelectorTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.ByteSizeValue; @@ -873,7 +874,7 @@ public void testSelectLeastLoadedMlNode_noNodesMatchingModelSnapshotMinVersion() new TransportAddress(InetAddress.getLoopbackAddress(), 9300), nodeAttr, ROLES_WITH_ML, - Version.fromString("6.2.0") + VersionInformation.inferVersions(Version.fromString("6.2.0")) ) ) .add( @@ -883,7 +884,7 @@ public void testSelectLeastLoadedMlNode_noNodesMatchingModelSnapshotMinVersion() new TransportAddress(InetAddress.getLoopbackAddress(), 9301), nodeAttr, ROLES_WITH_ML, - Version.fromString("6.1.0") + VersionInformation.inferVersions(Version.fromString("6.1.0")) ) ) .build(); @@ -931,7 +932,7 @@ public void testSelectLeastLoadedMlNode_jobWithRules() { new TransportAddress(InetAddress.getLoopbackAddress(), 9300), nodeAttr, ROLES_WITH_ML, - Version.fromString("6.2.0") + VersionInformation.inferVersions(Version.fromString("6.2.0")) ) ) .add( @@ -941,7 +942,7 @@ public void testSelectLeastLoadedMlNode_jobWithRules() { new TransportAddress(InetAddress.getLoopbackAddress(), 9301), nodeAttr, ROLES_WITH_ML, - Version.fromString("6.4.0") + VersionInformation.inferVersions(Version.fromString("6.4.0")) ) ) .build(); diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java index 85adf1098311..6be8f0dd6f7b 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.license.License; import org.elasticsearch.license.LicenseService; import org.elasticsearch.license.LicensedFeature; @@ -130,7 +131,8 @@ public Collection createComponents( IndexNameExpressionResolver expressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { final ClusterSettings clusterSettings = clusterService.getClusterSettings(); final CleanerService cleanerService = new CleanerService(settings, clusterSettings, threadPool, getLicenseState()); diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/cleaner/CleanerService.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/cleaner/CleanerService.java index 494dab578a3e..48a7a3babf7c 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/cleaner/CleanerService.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/cleaner/CleanerService.java @@ -11,7 +11,7 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.AbstractLifecycleRunnable; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.TimeValue; import org.elasticsearch.license.XPackLicenseState; @@ -140,19 +140,16 @@ public interface Listener { * {@code IndicesCleaner} runs and reschedules itself in order to automatically clean (delete) indices that are outside of the * {@link #getRetention() retention} period. */ - class IndicesCleaner extends AbstractLifecycleRunnable { + class IndicesCleaner extends AbstractRunnable { private volatile Scheduler.Cancellable cancellable; - /** - * Enable automatic logging and stopping of the runnable based on the {@link #lifecycle}. - */ - IndicesCleaner() { - super(lifecycle, logger); - } - @Override - protected void doRunInLifecycle() throws Exception { + protected void doRun() { + if (lifecycle.stoppedOrClosed()) { + return; + } + // fetch the retention, which is depends on a bunch of rules TimeValue retention = getRetention(); @@ -174,7 +171,11 @@ protected void doRunInLifecycle() throws Exception { * Reschedule the cleaner if the service is not stopped. */ @Override - protected void onAfterInLifecycle() { + public void onAfter() { + if (lifecycle.stoppedOrClosed()) { + return; + } + ZonedDateTime start = ZonedDateTime.now(Clock.systemUTC()); TimeValue delay = executionScheduler.nextExecutionDelay(start); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/cleaner/CleanerServiceTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/cleaner/CleanerServiceTests.java index cdbdd0752759..4b1bb2c6e62f 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/cleaner/CleanerServiceTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/cleaner/CleanerServiceTests.java @@ -8,6 +8,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; import org.elasticsearch.core.TimeValue; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.test.ESTestCase; @@ -26,6 +27,7 @@ import java.util.Collections; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; import static org.hamcrest.Matchers.equalTo; import static org.mockito.Mockito.mock; @@ -140,6 +142,36 @@ public void testExecution() throws InterruptedException { assertThat(latch.getCount(), equalTo(0L)); } + public void testLifecycle() { + final var deterministicTaskQueue = new DeterministicTaskQueue(); + final var threadPool = deterministicTaskQueue.getThreadPool(); + final var mockLicenseState = mock(XPackLicenseState.class); + + CleanerService service = new CleanerService( + Settings.EMPTY, + clusterSettings, + mockLicenseState, + threadPool, + new TestExecutionScheduler(1_000) + ); + + final var cleanupCount = new AtomicInteger(); + service.add(ignored -> cleanupCount.incrementAndGet()); + + service.start(); + while (cleanupCount.get() < 10) { + deterministicTaskQueue.advanceTime(); + deterministicTaskQueue.runAllRunnableTasks(); + } + + service.stop(); + if (randomBoolean()) { + service.close(); + } + deterministicTaskQueue.runAllTasks(); // ensures the scheduling stops + assertEquals(10, cleanupCount.get()); + } + class TestListener implements CleanerService.Listener { final CountDownLatch latch; diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java index d56dca65ebcb..69649ee6c1f9 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java @@ -458,7 +458,7 @@ public void testToXContent() throws IOException { Version.CURRENT, Version.CURRENT, IndexVersion.MINIMUM_COMPATIBLE, - IndexVersion.CURRENT, + IndexVersion.current(), apmIndicesExist }; final String expectedJson = Strings.format(""" { @@ -760,8 +760,8 @@ public void testToXContent() throws IOException { "master" ], "version": "%s", - "minIndexVersion":"%s", - "maxIndexVersion":"%s" + "min_index_version":%s, + "max_index_version":%s } }, "transport_versions": [] diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexRecoveryMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexRecoveryMonitoringDocTests.java index 1b9de32687ba..e36a50233fb8 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexRecoveryMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexRecoveryMonitoringDocTests.java @@ -103,7 +103,7 @@ public void testToXContent() throws IOException { new TransportAddress(TransportAddress.META_ADDRESS, 9301), singletonMap("attr", "value_1"), singleton(DiscoveryNodeRole.DATA_ROLE), - new VersionInformation(Version.CURRENT.minimumCompatibilityVersion(), IndexVersion.MINIMUM_COMPATIBLE, IndexVersion.CURRENT) + new VersionInformation(Version.CURRENT.minimumCompatibilityVersion(), IndexVersion.MINIMUM_COMPATIBLE, IndexVersion.current()) ); final ShardId shardId = new ShardId("_index_a", "_uuid_a", 0); diff --git a/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveLicenseIntegTests.java b/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveLicenseIntegTests.java index 06883d88c50e..1efeda816f59 100644 --- a/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveLicenseIntegTests.java +++ b/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveLicenseIntegTests.java @@ -81,9 +81,7 @@ public void testFailRestoreOnTooOldVersion() { SnapshotRestoreException e = expectThrows(SnapshotRestoreException.class, () -> clusterAdmin().restoreSnapshot(req).actionGet()); assertThat( e.getMessage(), - containsString( - "the snapshot was created with Elasticsearch version [2.0.0] " + "which isn't supported by the archive functionality" - ) + containsString("the snapshot has indices of version [2000099] which isn't supported by the archive functionality") ); } diff --git a/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/OldLuceneVersions.java b/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/OldLuceneVersions.java index 937554f6a06a..df22c6d8ea9a 100644 --- a/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/OldLuceneVersions.java +++ b/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/OldLuceneVersions.java @@ -33,6 +33,7 @@ import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.ReadOnlyEngine; @@ -40,6 +41,7 @@ import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.translog.TranslogStats; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.license.License; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.LicensedFeature; @@ -83,7 +85,7 @@ public class OldLuceneVersions extends Plugin implements IndexStorePlugin, Clust License.OperationMode.ENTERPRISE ); - private static Version MINIMUM_ARCHIVE_VERSION = Version.fromString("5.0.0"); + private static final IndexVersion MINIMUM_ARCHIVE_VERSION = IndexVersion.fromId(5000099); private final SetOnce failShardsListener = new SetOnce<>(); @@ -101,7 +103,8 @@ public Collection createComponents( final IndexNameExpressionResolver resolver, final Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { this.failShardsListener.set(new FailShardsOnInvalidLicenseClusterListener(getLicenseState(), clusterService.getRerouteService())); if (DiscoveryNode.isMasterNode(environment.settings())) { @@ -153,7 +156,7 @@ public void afterFilesRestoredFromRepository(IndexShard indexShard) { } @Override - public BiConsumer addPreRestoreVersionCheck() { + public BiConsumer addPreRestoreVersionCheck() { return (snapshot, version) -> { if (version.isLegacyIndexVersion()) { if (ARCHIVE_FEATURE.checkWithoutTracking(getLicenseState()) == false) { @@ -162,9 +165,7 @@ public BiConsumer addPreRestoreVersionCheck() { if (version.before(MINIMUM_ARCHIVE_VERSION)) { throw new SnapshotRestoreException( snapshot, - "the snapshot was created with Elasticsearch version [" - + version - + "] which isn't supported by the archive functionality" + "the snapshot has indices of version [" + version + "] which isn't supported by the archive functionality" ); } } diff --git a/x-pack/plugin/profiler/src/main/java/org/elasticsearch/xpack/profiler/ProfilingPlugin.java b/x-pack/plugin/profiler/src/main/java/org/elasticsearch/xpack/profiler/ProfilingPlugin.java index 1cec0b98a66b..6e8e6d9664c4 100644 --- a/x-pack/plugin/profiler/src/main/java/org/elasticsearch/xpack/profiler/ProfilingPlugin.java +++ b/x-pack/plugin/profiler/src/main/java/org/elasticsearch/xpack/profiler/ProfilingPlugin.java @@ -26,6 +26,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; @@ -81,7 +82,8 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { logger.info("Profiling is {}", enabled ? "enabled" : "disabled"); registry.set(new ProfilingIndexTemplateRegistry(settings, clusterService, threadPool, client, xContentRegistry)); diff --git a/x-pack/plugin/profiler/src/main/java/org/elasticsearch/xpack/profiler/StackFrame.java b/x-pack/plugin/profiler/src/main/java/org/elasticsearch/xpack/profiler/StackFrame.java index e2621d926272..7bad037e6c81 100644 --- a/x-pack/plugin/profiler/src/main/java/org/elasticsearch/xpack/profiler/StackFrame.java +++ b/x-pack/plugin/profiler/src/main/java/org/elasticsearch/xpack/profiler/StackFrame.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.profiler; -import org.elasticsearch.xcontent.ObjectPath; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -18,10 +17,6 @@ import java.util.Objects; final class StackFrame implements ToXContentObject { - private static final String[] PATH_FILE_NAME = new String[] { "Stackframe", "file", "name" }; - private static final String[] PATH_FUNCTION_NAME = new String[] { "Stackframe", "function", "name" }; - private static final String[] PATH_FUNCTION_OFFSET = new String[] { "Stackframe", "function", "offset" }; - private static final String[] PATH_LINE_NUMBER = new String[] { "Stackframe", "line", "number" }; List fileName; List functionName; List functionOffset; @@ -46,26 +41,12 @@ private static List listOf(Object o) { } public static StackFrame fromSource(Map source) { - // stack frames may either be stored with synthetic source or regular one - // which results either in a nested or flat document structure. - - if (source.containsKey("Stackframe")) { - // synthetic source - return new StackFrame( - ObjectPath.eval(PATH_FILE_NAME, source), - ObjectPath.eval(PATH_FUNCTION_NAME, source), - ObjectPath.eval(PATH_FUNCTION_OFFSET, source), - ObjectPath.eval(PATH_LINE_NUMBER, source) - ); - } else { - // regular source - return new StackFrame( - source.get("Stackframe.file.name"), - source.get("Stackframe.function.name"), - source.get("Stackframe.function.offset"), - source.get("Stackframe.line.number") - ); - } + return new StackFrame( + source.get("Stackframe.file.name"), + source.get("Stackframe.function.name"), + source.get("Stackframe.function.offset"), + source.get("Stackframe.line.number") + ); } @Override diff --git a/x-pack/plugin/profiler/src/test/java/org/elasticsearch/xpack/profiler/ProfilingIndexManagerTests.java b/x-pack/plugin/profiler/src/test/java/org/elasticsearch/xpack/profiler/ProfilingIndexManagerTests.java index 3117b7732a98..be5d46e17804 100644 --- a/x-pack/plugin/profiler/src/test/java/org/elasticsearch/xpack/profiler/ProfilingIndexManagerTests.java +++ b/x-pack/plugin/profiler/src/test/java/org/elasticsearch/xpack/profiler/ProfilingIndexManagerTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.profiler; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; @@ -27,6 +26,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; @@ -172,7 +172,7 @@ private ClusterState createClusterState(Settings nodeSettings, Iterable for (String index : existingIndices) { IndexMetadata mockMetadata = mock(IndexMetadata.class); when(mockMetadata.getIndex()).thenReturn(new Index(index, index)); - when(mockMetadata.getCompatibilityVersion()).thenReturn(Version.CURRENT); + when(mockMetadata.getCompatibilityVersion()).thenReturn(IndexVersion.current()); indices.put(index, mockMetadata); } return ClusterState.builder(new ClusterName("test")) diff --git a/x-pack/plugin/profiler/src/test/java/org/elasticsearch/xpack/profiler/StackFrameTests.java b/x-pack/plugin/profiler/src/test/java/org/elasticsearch/xpack/profiler/StackFrameTests.java index b5a0a60bfea8..81bc8dd8f362 100644 --- a/x-pack/plugin/profiler/src/test/java/org/elasticsearch/xpack/profiler/StackFrameTests.java +++ b/x-pack/plugin/profiler/src/test/java/org/elasticsearch/xpack/profiler/StackFrameTests.java @@ -23,25 +23,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; public class StackFrameTests extends ESTestCase { - public void testCreateFromSyntheticSource() { - // tag::noformat - StackFrame frame = StackFrame.fromSource( - Map.of("Stackframe", Map.of( - "file", Map.of("name", "Main.java"), - "function", Map.of( - "name", "helloWorld", - "offset", 31733 - ), - "line", Map.of("number", 22)) - ) - ); - // end::noformat - assertEquals(List.of("Main.java"), frame.fileName); - assertEquals(List.of("helloWorld"), frame.functionName); - assertEquals(List.of(31733), frame.functionOffset); - assertEquals(List.of(22), frame.lineNumber); - } - public void testCreateFromRegularSource() { // tag::noformat StackFrame frame = StackFrame.fromSource( diff --git a/x-pack/plugin/rollup/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/rollup/60_settings.yml b/x-pack/plugin/rollup/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/rollup/60_settings.yml index 054126ce5171..a2c46b3da601 100644 --- a/x-pack/plugin/rollup/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/rollup/60_settings.yml +++ b/x-pack/plugin/rollup/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/rollup/60_settings.yml @@ -94,8 +94,10 @@ --- "Downsample datastream with tier preference": - skip: - version: " - 8.4.99" - reason: "rollup renamed to downsample in 8.5.0" + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/97150" +# version: " - 8.4.99" +# reason: "rollup renamed to downsample in 8.5.0" - do: indices.put_index_template: diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/downsample/RestDownsampleAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/downsample/RestDownsampleAction.java index e7705657fe56..23af864be9ec 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/downsample/RestDownsampleAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/downsample/RestDownsampleAction.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.downsample; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; @@ -14,7 +15,6 @@ import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.core.downsample.DownsampleAction; -import org.elasticsearch.xpack.core.downsample.DownsampleConfig; import java.io.IOException; import java.util.List; diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/downsample/RollupShardIndexer.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/downsample/RollupShardIndexer.java index 14b67e3cb2c2..0130c0aaf840 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/downsample/RollupShardIndexer.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/downsample/RollupShardIndexer.java @@ -17,6 +17,7 @@ import org.elasticsearch.action.bulk.BulkProcessor2; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.Rounding; @@ -45,7 +46,6 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xpack.core.downsample.DownsampleConfig; import org.elasticsearch.xpack.core.downsample.DownsampleIndexerAction; import org.elasticsearch.xpack.core.rollup.action.RollupShardTask; diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java index a239f4ed29c7..7f3fa21811ef 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java @@ -20,6 +20,7 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.AcknowledgedTransportMasterNodeAction; @@ -68,7 +69,6 @@ import org.elasticsearch.xpack.aggregatemetric.mapper.AggregateDoubleMetricFieldMapper; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.downsample.DownsampleAction; -import org.elasticsearch.xpack.core.downsample.DownsampleConfig; import org.elasticsearch.xpack.core.downsample.DownsampleIndexerAction; import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField; import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; @@ -533,28 +533,11 @@ private static void validateDownsamplingInterval(MapperService mapperService, Do if (meta.isEmpty() == false) { String interval = meta.get(config.getIntervalType()); if (interval != null) { - DateHistogramInterval sourceIndexInterval = new DateHistogramInterval(interval); - DateHistogramInterval targetIndexInterval = config.getInterval(); - long sourceMillis = sourceIndexInterval.estimateMillis(); - long targetMillis = targetIndexInterval.estimateMillis(); - if (sourceMillis >= targetMillis) { - // Downsampling interval must be greater than source interval - e.addValidationError( - "Source index is a downsampled index. Downsampling interval [" - + targetIndexInterval - + "] must be greater than the source index interval [" - + sourceIndexInterval - + "]" - ); - } else if (targetMillis % sourceMillis != 0) { - // Downsampling interval must be a multiple of the source interval - e.addValidationError( - "Source index is a downsampled index. Downsampling interval [" - + targetIndexInterval - + "] must be a multiple of the source index interval [" - + sourceIndexInterval - + "]" - ); + try { + DownsampleConfig sourceConfig = new DownsampleConfig(new DateHistogramInterval(interval)); + DownsampleConfig.validateSourceAndTargetIntervals(sourceConfig, config); + } catch (IllegalArgumentException exception) { + e.addValidationError("Source index is a downsampled index. " + exception.getMessage()); } } @@ -566,14 +549,13 @@ private static void validateDownsamplingInterval(MapperService mapperService, Do + config.getTimeZone() + "] cannot be different than the source index timezone [" + sourceTimezone - + "]" + + "]." ); } if (e.validationErrors().isEmpty() == false) { throw e; } - } } diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java index 4b39c34bca69..d5c30dae0d9a 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.datastreams.CreateDataStreamAction; import org.elasticsearch.action.datastreams.GetDataStreamAction; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -77,7 +78,6 @@ import org.elasticsearch.xpack.aggregatemetric.AggregateMetricMapperPlugin; import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; import org.elasticsearch.xpack.core.downsample.DownsampleAction; -import org.elasticsearch.xpack.core.downsample.DownsampleConfig; import org.elasticsearch.xpack.core.ilm.LifecycleSettings; import org.elasticsearch.xpack.core.ilm.RolloverAction; import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/downsample/DownsampleDataStreamTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/downsample/DownsampleDataStreamTests.java index 52b3037fd3e2..aed478caa87b 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/downsample/DownsampleDataStreamTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/downsample/DownsampleDataStreamTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.action.datastreams.CreateDataStreamAction; import org.elasticsearch.action.datastreams.GetDataStreamAction; import org.elasticsearch.action.datastreams.ModifyDataStreamsAction; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; @@ -48,7 +49,6 @@ import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.downsample.DownsampleAction; -import org.elasticsearch.xpack.core.downsample.DownsampleConfig; import org.elasticsearch.xpack.rollup.Rollup; import org.hamcrest.Matchers; diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/downsample/DownsampleTransportFailureTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/downsample/DownsampleTransportFailureTests.java index 8fc73e667684..7890463936a5 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/downsample/DownsampleTransportFailureTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/downsample/DownsampleTransportFailureTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; @@ -36,7 +37,6 @@ import org.elasticsearch.xpack.aggregatemetric.AggregateMetricMapperPlugin; import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; import org.elasticsearch.xpack.core.downsample.DownsampleAction; -import org.elasticsearch.xpack.core.downsample.DownsampleConfig; import org.elasticsearch.xpack.core.downsample.DownsampleIndexerAction; import org.elasticsearch.xpack.rollup.Rollup; import org.junit.Before; diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java index f3b44626580c..6234e789a8dd 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java @@ -721,7 +721,7 @@ private Map createFieldTypes(RollupJobConfig job) { ScriptCompiler.NONE, false, false, - IndexVersion.CURRENT, + IndexVersion.current(), null ).build(MapperBuilderContext.root(false)).fieldType(); fieldTypes.put(ft.name(), ft); @@ -730,7 +730,7 @@ private Map createFieldTypes(RollupJobConfig job) { if (job.getGroupConfig().getTerms() != null) { for (String field : job.getGroupConfig().getTerms().getFields()) { - MappedFieldType ft = new KeywordFieldMapper.Builder(field, IndexVersion.CURRENT).build(MapperBuilderContext.root(false)) + MappedFieldType ft = new KeywordFieldMapper.Builder(field, IndexVersion.current()).build(MapperBuilderContext.root(false)) .fieldType(); fieldTypes.put(ft.name(), ft); } @@ -744,7 +744,7 @@ private Map createFieldTypes(RollupJobConfig job) { ScriptCompiler.NONE, false, false, - IndexVersion.CURRENT, + IndexVersion.current(), null ).build(MapperBuilderContext.root(false)).fieldType(); fieldTypes.put(ft.name(), ft); diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java index b6e712a44706..9866f5e3e2a8 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java @@ -51,6 +51,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogStats; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; @@ -330,7 +331,8 @@ public Collection createComponents( final IndexNameExpressionResolver resolver, final Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { final List components = new ArrayList<>(); this.repositoriesServiceSupplier = repositoriesServiceSupplier; diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocator.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocator.java index dcf48861e021..8534ed268c03 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocator.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocator.java @@ -8,7 +8,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.client.internal.Client; @@ -41,6 +40,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.gateway.AsyncShardFetch; import org.elasticsearch.gateway.ReplicaShardAllocator; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.snapshots.Snapshot; @@ -171,9 +171,9 @@ public void allocateUnassigned( final Snapshot snapshot = new Snapshot(repositoryName, snapshotId); - final Version version = shardRouting.recoverySource().getType() == RecoverySource.Type.SNAPSHOT + final IndexVersion version = shardRouting.recoverySource().getType() == RecoverySource.Type.SNAPSHOT ? ((RecoverySource.SnapshotRecoverySource) shardRouting.recoverySource()).version() - : Version.CURRENT; + : IndexVersion.current(); final RecoverySource.SnapshotRecoverySource recoverySource = new RecoverySource.SnapshotRecoverySource( recoveryUuid, diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/upgrade/SearchableSnapshotIndexMetadataUpgrader.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/upgrade/SearchableSnapshotIndexMetadataUpgrader.java index 20cf4029bf44..3d9aefba6591 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/upgrade/SearchableSnapshotIndexMetadataUpgrader.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/upgrade/SearchableSnapshotIndexMetadataUpgrader.java @@ -19,6 +19,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.indices.ShardLimitValidator; import org.elasticsearch.threadpool.ThreadPool; @@ -98,7 +99,8 @@ static boolean needsUpgrade(ClusterState state) { return state.metadata() .stream() .filter( - imd -> imd.getCompatibilityVersion().onOrAfter(Version.V_7_12_0) && imd.getCompatibilityVersion().before(Version.V_8_0_0) + imd -> imd.getCompatibilityVersion().onOrAfter(IndexVersion.V_7_12_0) + && imd.getCompatibilityVersion().before(IndexVersion.V_8_0_0) ) .filter(IndexMetadata::isPartialSearchableSnapshot) .map(IndexMetadata::getSettings) @@ -113,7 +115,8 @@ static ClusterState upgradeIndices(ClusterState currentState) { currentState.metadata() .stream() .filter( - imd -> imd.getCompatibilityVersion().onOrAfter(Version.V_7_12_0) && imd.getCompatibilityVersion().before(Version.V_8_0_0) + imd -> imd.getCompatibilityVersion().onOrAfter(IndexVersion.V_7_12_0) + && imd.getCompatibilityVersion().before(IndexVersion.V_8_0_0) ) .filter(imd -> imd.isPartialSearchableSnapshot() && notFrozenShardLimitGroup(imd.getSettings())) .map(SearchableSnapshotIndexMetadataUpgrader::setShardLimitGroupFrozen) diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsTestCase.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsTestCase.java index 9534095a82aa..1c4ef99a32a8 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsTestCase.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsTestCase.java @@ -14,7 +14,6 @@ import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; -import org.elasticsearch.Version; import org.elasticsearch.blobcache.common.ByteRange; import org.elasticsearch.blobcache.shared.SharedBlobCacheService; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -37,6 +36,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.Store; import org.elasticsearch.indices.recovery.RecoveryState; @@ -241,7 +241,7 @@ protected static SearchableSnapshotRecoveryState createRecoveryState(boolean fin new RecoverySource.SnapshotRecoverySource( UUIDs.randomBase64UUID(), new Snapshot("repo", new SnapshotId(randomAlphaOfLength(8), UUIDs.randomBase64UUID())), - Version.CURRENT, + IndexVersion.current(), new IndexId("some_index", UUIDs.randomBase64UUID(random())) ) ); diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocatorTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocatorTests.java index dacd973ab1e0..14bcc35686db 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocatorTests.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotAllocatorTests.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; import org.elasticsearch.index.IndexModule; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.snapshots.SearchableSnapshotsSettings; @@ -265,7 +266,7 @@ private static RecoverySource.SnapshotRecoverySource randomSnapshotSource(ShardI return new RecoverySource.SnapshotRecoverySource( UUIDs.randomBase64UUID(random()), new Snapshot("test-repo", new SnapshotId("test-snap", UUIDs.randomBase64UUID(random()))), - Version.CURRENT, + IndexVersion.current(), new IndexId(shardId.getIndexName(), UUIDs.randomBase64UUID(random())) ); } diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryStatsTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryStatsTests.java index 98e427c16c46..25049cf5791b 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryStatsTests.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryStatsTests.java @@ -9,7 +9,6 @@ import org.apache.lucene.store.BufferedIndexInput; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; -import org.elasticsearch.Version; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.blobcache.BlobCacheTestUtils; import org.elasticsearch.blobcache.shared.SharedBlobCacheService; @@ -642,7 +641,7 @@ private void executeTestCase( fileName, fileContent.length, fileChecksum, - IndexVersion.CURRENT.luceneVersion().toString() + IndexVersion.current().luceneVersion().toString() ); final List files = List.of(new FileInfo(blobName, metadata, ByteSizeValue.ofBytes(fileContent.length))); final BlobStoreIndexShardSnapshot snapshot = new BlobStoreIndexShardSnapshot(snapshotId.getName(), files, 0L, 0L, 0, 0L); @@ -689,7 +688,7 @@ protected IndexInputStats createIndexInputStats(long numFiles, long totalSize, l new RecoverySource.SnapshotRecoverySource( UUIDs.randomBase64UUID(), new Snapshot("repo", new SnapshotId(randomAlphaOfLength(8), UUIDs.randomBase64UUID())), - Version.CURRENT, + IndexVersion.current(), new IndexId("some_index", UUIDs.randomBase64UUID(random())) ) ); diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryTests.java index dc8c29be139b..8753ba4ae392 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryTests.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryTests.java @@ -735,7 +735,7 @@ public void testClearCache() throws Exception { randomFiles.add( new BlobStoreIndexShardSnapshot.FileInfo( blobName, - new StoreFileMetadata(fileName, input.length, checksum, IndexVersion.CURRENT.luceneVersion().toString()), + new StoreFileMetadata(fileName, input.length, checksum, IndexVersion.current().luceneVersion().toString()), ByteSizeValue.ofBytes(input.length) ) ); diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/CachedBlobContainerIndexInputTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/CachedBlobContainerIndexInputTests.java index 05e2df692c56..ec4eb8faef3d 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/CachedBlobContainerIndexInputTests.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/CachedBlobContainerIndexInputTests.java @@ -75,7 +75,7 @@ public void testRandomReads() throws Exception { fileName, input.length, checksum, - IndexVersion.CURRENT.luceneVersion().toString() + IndexVersion.current().luceneVersion().toString() ); final int partSize = randomBoolean() ? input.length : randomIntBetween(1, input.length); @@ -194,7 +194,7 @@ public void testThrowsEOFException() throws Exception { fileName, input.length, checksum, - IndexVersion.CURRENT.luceneVersion().toString() + IndexVersion.current().luceneVersion().toString() ); final BlobStoreIndexShardSnapshot snapshot = new BlobStoreIndexShardSnapshot( diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInputTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInputTests.java index 8a869cfb2c0e..f2bab4ea974b 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInputTests.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInputTests.java @@ -55,7 +55,7 @@ public void testRandomReads() throws IOException { final FileInfo fileInfo = new FileInfo( randomAlphaOfLength(10), - new StoreFileMetadata(fileName, fileData.length, checksum, IndexVersion.CURRENT.luceneVersion().toString()), + new StoreFileMetadata(fileName, fileData.length, checksum, IndexVersion.current().luceneVersion().toString()), ByteSizeValue.ofBytes(fileData.length) ); diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityFcActionAuthorizationIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityFcActionAuthorizationIT.java index 717cb438a22f..259928043e16 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityFcActionAuthorizationIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityFcActionAuthorizationIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.remote.RemoteClusterNodesAction; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesAction; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; @@ -17,6 +16,7 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; @@ -437,7 +437,7 @@ private static MockTransportService startTransport( final MockTransportService service = MockTransportService.createNewService( builder.build(), - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, null diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/crossclusteraccess/CrossClusterAccessHeadersForCcsRestIT.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/crossclusteraccess/CrossClusterAccessHeadersForCcsRestIT.java index d2c47e02787b..172efca81be5 100644 --- a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/crossclusteraccess/CrossClusterAccessHeadersForCcsRestIT.java +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/crossclusteraccess/CrossClusterAccessHeadersForCcsRestIT.java @@ -12,7 +12,6 @@ import org.apache.http.nio.entity.NStringEntity; import org.apache.lucene.search.TotalHits; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.remote.RemoteClusterNodesAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; @@ -28,6 +27,7 @@ import org.elasticsearch.client.Response; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; @@ -1106,7 +1106,7 @@ private static MockTransportService startTransport( .build(); final MockTransportService service = MockTransportService.createNewService( settings, - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), threadPool, null diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateActionTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateActionTests.java index 28f9229f5abe..6aff36d3c809 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateActionTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateActionTests.java @@ -32,6 +32,8 @@ public class RestAuthenticateActionTests extends SecurityIntegTestCase { private static boolean anonymousEnabled; + private static boolean operatorUser; + private static boolean operatorPrivilegesEnabled; private static String domainName; @BeforeClass @@ -44,11 +46,26 @@ public static void maybeSetDomain() { domainName = randomFrom(randomAlphaOfLengthBetween(3, 5), null); } + @BeforeClass + public static void maybeSetOperator() { + operatorUser = randomBoolean(); + operatorPrivilegesEnabled = randomBoolean(); + } + @Override protected boolean addMockHttpTransport() { return false; // enable http } + @Override + protected String configOperatorUsers() { + return super.configOperatorUsers() + + "operator:\n" + + " - usernames: ['" + + (operatorUser ? SecuritySettingsSource.TEST_USER_NAME : "_another_user") + + "']\n"; + } + @Override protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings)); @@ -61,6 +78,7 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { if (domainName != null) { builder.put(DOMAIN_TO_REALM_ASSOC_SETTING.getConcreteSettingForNamespace(domainName).getKey(), "file"); } + builder.put("xpack.security.operator_privileges.enabled", operatorPrivilegesEnabled); return builder.build(); } @@ -101,6 +119,11 @@ public void testAuthenticateApi() throws Exception { assertThat(roles.size(), is(1)); assertThat(roles, contains(SecuritySettingsSource.TEST_ROLE)); } + if (operatorUser && operatorPrivilegesEnabled) { + assertThat(objectPath.evaluate("operator"), equalTo(true)); + } else { + assertThat(objectPath.evaluate("operator"), equalTo(null)); + } } public void testAuthenticateApiWithoutAuthentication() throws Exception { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index b571687646b6..a280b6005cd8 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -61,6 +61,7 @@ import org.elasticsearch.http.netty4.internal.HttpHeadersAuthenticatorUtils; import org.elasticsearch.http.netty4.internal.HttpValidator; import org.elasticsearch.index.IndexModule; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.ingest.Processor; @@ -623,7 +624,8 @@ public Collection createComponents( IndexNameExpressionResolver expressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { try { return createComponents( diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateAction.java index 0ca85ada4b5e..d6063254b327 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateAction.java @@ -21,6 +21,7 @@ import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.InternalUser; import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.operator.OperatorPrivileges; public class TransportAuthenticateAction extends HandledTransportAction { @@ -56,7 +57,12 @@ protected void doExecute(Task task, AuthenticateRequest request, ActionListener< } else if (runAsUser instanceof InternalUser) { listener.onFailure(new IllegalArgumentException("user [" + runAsUser.principal() + "] is internal")); } else { - listener.onResponse(new AuthenticateResponse(authentication.maybeAddAnonymousRoles(anonymousUser))); + listener.onResponse( + new AuthenticateResponse( + authentication.maybeAddAnonymousRoles(anonymousUser), + OperatorPrivileges.isOperator(securityContext.getThreadContext()) + ) + ); } } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java index 47091c75a0e7..ea0870c1f987 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java @@ -21,7 +21,7 @@ import static org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskAction.TASKS_ORIGIN; import static org.elasticsearch.action.support.replication.PostWriteRefresh.POST_WRITE_REFRESH_ORIGIN; -import static org.elasticsearch.cluster.metadata.DataLifecycle.DLM_ORIGIN; +import static org.elasticsearch.cluster.metadata.DataLifecycle.DATA_STREAM_LIFECYCLE_ORIGIN; import static org.elasticsearch.ingest.IngestService.INGEST_ORIGIN; import static org.elasticsearch.persistent.PersistentTasksService.PERSISTENT_TASK_ORIGIN; import static org.elasticsearch.synonyms.SynonymsManagementAPIService.SYNONYMS_ORIGIN; @@ -128,7 +128,7 @@ public static void switchUserBasedOnActionOriginAndExecute( case POST_WRITE_REFRESH_ORIGIN: securityContext.executeAsInternalUser(InternalUsers.STORAGE_USER, version, consumer); break; - case DLM_ORIGIN: + case DATA_STREAM_LIFECYCLE_ORIGIN: securityContext.executeAsInternalUser(InternalUsers.DATA_STREAM_LIFECYCLE_USER, version, consumer); break; case WATCHER_ORIGIN: diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/operator/OperatorPrivileges.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/operator/OperatorPrivileges.java index c6ddef0f5193..95c487a67cfd 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/operator/OperatorPrivileges.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/operator/OperatorPrivileges.java @@ -34,6 +34,12 @@ public class OperatorPrivileges { Setting.Property.NodeScope ); + public static boolean isOperator(ThreadContext threadContext) { + return AuthenticationField.PRIVILEGE_CATEGORY_VALUE_OPERATOR.equals( + threadContext.getHeader(AuthenticationField.PRIVILEGE_CATEGORY_KEY) + ); + } + public interface OperatorPrivilegesService { /** * Set a ThreadContext Header {@link AuthenticationField#PRIVILEGE_CATEGORY_KEY} if authentication @@ -126,9 +132,7 @@ public ElasticsearchSecurityException check( if (user instanceof InternalUser && false == authentication.isRunAs()) { return null; } - if (false == AuthenticationField.PRIVILEGE_CATEGORY_VALUE_OPERATOR.equals( - threadContext.getHeader(AuthenticationField.PRIVILEGE_CATEGORY_KEY) - )) { + if (false == isOperator(threadContext)) { // Only check whether request is operator-only when user is NOT an operator logger.trace("Checking operator-only violation for user [{}] and action [{}]", user, action); final OperatorPrivilegesViolation violation = operatorOnlyRegistry.check(action, request); @@ -144,9 +148,7 @@ public boolean checkRest(RestHandler restHandler, RestRequest restRequest, RestC if (false == shouldProcess()) { return true; } - if (false == AuthenticationField.PRIVILEGE_CATEGORY_VALUE_OPERATOR.equals( - threadContext.getHeader(AuthenticationField.PRIVILEGE_CATEGORY_KEY) - )) { + if (false == isOperator(threadContext)) { // Only check whether request is operator-only when user is NOT an operator if (logger.isTraceEnabled()) { Authentication authentication = threadContext.getTransient(AuthenticationField.AUTHENTICATION_KEY); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateAction.java index b48393559fc4..680f6b0f14f5 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/RestAuthenticateAction.java @@ -63,11 +63,10 @@ public RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient c new RestBuilderListener(channel) { @Override public RestResponse buildResponse(AuthenticateResponse authenticateResponse, XContentBuilder builder) throws Exception { - authenticateResponse.authentication().toXContent(builder, ToXContent.EMPTY_PARAMS); + authenticateResponse.toXContent(builder, ToXContent.EMPTY_PARAMS); return new RestResponse(RestStatus.OK, builder); } } ); - } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SettingsFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SettingsFilterTests.java index dd4b13f0d297..6f98a2b5bba0 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SettingsFilterTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SettingsFilterTests.java @@ -21,7 +21,6 @@ import org.hamcrest.Matcher; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -141,7 +140,7 @@ public void testFiltering() throws Exception { List settingsFilterList = new ArrayList<>(); settingsFilterList.addAll(securityPlugin.getSettingsFilter()); // custom settings, potentially added by a plugin - SettingsModule settingsModule = new SettingsModule(settings, settingList, settingsFilterList, Collections.emptySet()); + SettingsModule settingsModule = new SettingsModule(settings, settingList, settingsFilterList); Injector injector = Guice.createInjector(settingsModule); SettingsFilter settingsFilter = injector.getInstance(SettingsFilter.class); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheckTests.java index 27796b5ad37a..3a1ae84b7c68 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheckTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheckTests.java @@ -34,7 +34,7 @@ public void testFailureUpgradeFrom7xWithImplicitSecuritySettings() throws Except Version.V_8_0_0, () -> VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.V_8_0_0) ); - NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(10), previousVersion, IndexVersion.CURRENT); + NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(10), previousVersion, IndexVersion.current()); nodeMetadata = nodeMetadata.upgradeToCurrentVersion(); ClusterStateLicenseService licenseService = mock(ClusterStateLicenseService.class); Metadata metadata = createLicensesMetadata(previousVersion, randomFrom("basic", "trial")); @@ -68,7 +68,7 @@ public void testUpgradeFrom7xWithImplicitSecuritySettingsOnGoldPlus() throws Exc Version.V_8_0_0, () -> VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.V_8_0_0) ); - NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(10), previousVersion, IndexVersion.CURRENT); + NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(10), previousVersion, IndexVersion.current()); nodeMetadata = nodeMetadata.upgradeToCurrentVersion(); ClusterStateLicenseService licenseService = mock(ClusterStateLicenseService.class); Metadata metadata = createLicensesMetadata(previousVersion, randomFrom("gold", "platinum")); @@ -86,7 +86,7 @@ public void testUpgradeFrom7xWithExplicitSecuritySettings() throws Exception { Version.V_8_0_0, () -> VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.V_8_0_0) ); - NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(10), previousVersion, IndexVersion.CURRENT); + NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(10), previousVersion, IndexVersion.current()); nodeMetadata = nodeMetadata.upgradeToCurrentVersion(); ClusterStateLicenseService licenseService = mock(ClusterStateLicenseService.class); BootstrapCheck.BootstrapCheckResult result = new SecurityImplicitBehaviorBootstrapCheck(nodeMetadata, licenseService).check( @@ -100,7 +100,7 @@ public void testUpgradeFrom7xWithExplicitSecuritySettings() throws Exception { public void testUpgradeFrom8xWithImplicitSecuritySettings() throws Exception { final Version previousVersion = VersionUtils.randomVersionBetween(random(), Version.V_8_0_0, null); - NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(10), previousVersion, IndexVersion.CURRENT); + NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(10), previousVersion, IndexVersion.current()); nodeMetadata = nodeMetadata.upgradeToCurrentVersion(); ClusterStateLicenseService licenseService = mock(ClusterStateLicenseService.class); BootstrapCheck.BootstrapCheckResult result = new SecurityImplicitBehaviorBootstrapCheck(nodeMetadata, licenseService).check( @@ -111,7 +111,7 @@ public void testUpgradeFrom8xWithImplicitSecuritySettings() throws Exception { public void testUpgradeFrom8xWithExplicitSecuritySettings() throws Exception { final Version previousVersion = VersionUtils.randomVersionBetween(random(), Version.V_8_0_0, null); - NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(10), previousVersion, IndexVersion.CURRENT); + NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(10), previousVersion, IndexVersion.current()); nodeMetadata = nodeMetadata.upgradeToCurrentVersion(); ClusterStateLicenseService licenseService = mock(ClusterStateLicenseService.class); BootstrapCheck.BootstrapCheckResult result = new SecurityImplicitBehaviorBootstrapCheck(nodeMetadata, licenseService).check( diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index 31ee57f971b6..2050aa725985 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -189,7 +189,7 @@ protected SSLService getSslService() { private Collection createComponentsUtil(Settings settings) throws Exception { Environment env = TestEnvironment.newEnvironment(settings); - NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(8), Version.CURRENT, IndexVersion.CURRENT); + NodeMetadata nodeMetadata = new NodeMetadata(randomAlphaOfLength(8), Version.CURRENT, IndexVersion.current()); ThreadPool threadPool = mock(ThreadPool.class); ClusterService clusterService = mock(ClusterService.class); settings = Security.additionalSettings(settings, true); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java index 641c7e69d706..574857e071df 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportAuthenticateActionTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.ArrayUtils; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -20,6 +21,7 @@ import org.elasticsearch.xpack.core.security.action.user.AuthenticateRequest; import org.elasticsearch.xpack.core.security.action.user.AuthenticateResponse; import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.AuthenticationField; import org.elasticsearch.xpack.core.security.authc.AuthenticationTestHelper; import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.ElasticUser; @@ -42,6 +44,8 @@ public class TransportAuthenticateActionTests extends ESTestCase { + private ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + public void testInternalUser() { SecurityContext securityContext = mock(SecurityContext.class); final Authentication authentication = AuthenticationTestHelper.builder().internal().build(); @@ -123,6 +127,13 @@ public void testValidAuthentication() { final User user = randomFrom(new ElasticUser(true), new KibanaUser(true), new User("joe")); final Authentication authentication = AuthenticationTestHelper.builder().user(user).build(); final User effectiveUser = authentication.getEffectiveSubject().getUser(); + final boolean operator = randomBoolean(); + + if (operator) { + threadContext.putHeader(AuthenticationField.PRIVILEGE_CATEGORY_KEY, AuthenticationField.PRIVILEGE_CATEGORY_VALUE_OPERATOR); + } else if (randomBoolean()) { + threadContext.putHeader(AuthenticationField.PRIVILEGE_CATEGORY_KEY, AuthenticationField.PRIVILEGE_CATEGORY_VALUE_EMPTY); + } TransportAuthenticateAction action = prepareAction(anonymousUser, effectiveUser, authentication); @@ -141,6 +152,7 @@ public void onFailure(Exception e) { }); assertThat(responseRef.get(), notNullValue()); + assertThat(responseRef.get().isOperator(), is(operator)); if (anonymousUser.enabled() && false == (authentication.isApiKey() || authentication.isCrossClusterAccess())) { // Roles of anonymousUser are added to non api key authentication final Authentication auth = responseRef.get().authentication(); @@ -210,6 +222,7 @@ private TransportAuthenticateAction prepareAction(AnonymousUser anonymousUser, U SecurityContext securityContext = mock(SecurityContext.class); when(securityContext.getAuthentication()).thenReturn(authentication); when(securityContext.getUser()).thenReturn(user); + when(securityContext.getThreadContext()).thenReturn(this.threadContext); TransportService transportService = new TransportService( Settings.EMPTY, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmSettingsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmSettingsTests.java index 3eb52c86eab3..7cfcb89e7808 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmSettingsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmSettingsTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.hamcrest.Matchers; -import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -442,12 +441,7 @@ private IllegalArgumentException assertError(String realmType, String realmName, private void validate(Settings settings) { final Set> settingsSet = new HashSet<>(InternalRealmsSettings.getSettings()); - final AbstractScopedSettings validator = new AbstractScopedSettings( - settings, - settingsSet, - Collections.emptySet(), - Setting.Property.NodeScope - ) { + final AbstractScopedSettings validator = new AbstractScopedSettings(settings, settingsSet, Setting.Property.NodeScope) { }; validator.validate(settings, false); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationUtilsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationUtilsTests.java index b98cb797ed7a..a8324e3e1ff3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationUtilsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationUtilsTests.java @@ -121,7 +121,11 @@ public void testSwitchAndExecuteSecurityProfileUser() throws Exception { } public void testSwitchWithDlmOrigin() throws Exception { - assertSwitchBasedOnOriginAndExecute(DataLifecycle.DLM_ORIGIN, InternalUsers.DATA_STREAM_LIFECYCLE_USER, randomTransportVersion()); + assertSwitchBasedOnOriginAndExecute( + DataLifecycle.DATA_STREAM_LIFECYCLE_ORIGIN, + InternalUsers.DATA_STREAM_LIFECYCLE_USER, + randomTransportVersion() + ); } public void testSwitchAndExecuteXpackUser() throws Exception { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java index 834ba2e46d67..22e900fc4519 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java @@ -2920,8 +2920,7 @@ private RoleProviders buildRolesProvider( }).when(nativeRolesStore).getRoleDescriptors(isASet(), anyActionListener()); } if (reservedRolesStore == null) { - reservedRolesStore = mock(ReservedRolesStore.class); - doCallRealMethod().when(reservedRolesStore).accept(anySet(), anyActionListener()); + reservedRolesStore = new ReservedRolesStore(); } if (licenseState == null) { licenseState = new XPackLicenseState(() -> 0); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java index a7a6aaee32e8..238abd202b8b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java @@ -78,7 +78,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; -import static org.elasticsearch.cluster.metadata.DataLifecycle.DLM_ORIGIN; +import static org.elasticsearch.cluster.metadata.DataLifecycle.DATA_STREAM_LIFECYCLE_ORIGIN; import static org.elasticsearch.test.ActionListenerUtils.anyActionListener; import static org.elasticsearch.xpack.core.ClientHelper.ASYNC_SEARCH_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; @@ -430,7 +430,7 @@ public void testSetUserBasedOnActionOrigin() { InternalUsers.XPACK_USER, ASYNC_SEARCH_ORIGIN, InternalUsers.ASYNC_SEARCH_USER, - DLM_ORIGIN, + DATA_STREAM_LIFECYCLE_ORIGIN, InternalUsers.DATA_STREAM_LIFECYCLE_USER ); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HttpServerTransportTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HttpServerTransportTests.java index 000a8a90b14b..d0cba6b2381d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HttpServerTransportTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HttpServerTransportTests.java @@ -674,8 +674,6 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th ch.flushInbound(); }).get(); testThreadPool.generic().submit(() -> ch.close().get()).get(); - assertThat(dispatchThrowableReference.get().toString(), containsString("Connection closed before received headers")); - assertThat(badDispatchInvocationCount.get(), is(6)); assertThat(authnInvocationCount.get(), is(0)); } } finally { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SimpleSecurityNetty4ServerTransportTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SimpleSecurityNetty4ServerTransportTests.java index 308c0fb9b719..18cf0418e094 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SimpleSecurityNetty4ServerTransportTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SimpleSecurityNetty4ServerTransportTests.java @@ -14,12 +14,12 @@ import org.apache.lucene.util.Constants; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; import org.elasticsearch.common.network.NetworkService; @@ -186,7 +186,7 @@ public void testTcpHandshake() { TcpTransport originalTransport = (TcpTransport) serviceA.getOriginalTransport(); ConnectionProfile connectionProfile = ConnectionProfile.buildDefaultConnectionProfile(Settings.EMPTY); - try (TransportService service = buildService("TS_TPC", Version.CURRENT, TransportVersion.current(), Settings.EMPTY)) { + try (TransportService service = buildService("TS_TPC", VersionInformation.CURRENT, TransportVersion.current(), Settings.EMPTY)) { DiscoveryNode node = new DiscoveryNode( "TS_TPC", "TS_TPC", @@ -385,7 +385,12 @@ public void testSecurityClientAuthenticationConfigs() throws Exception { String value = randomCapitalization(SslClientAuthenticationMode.REQUIRED); Settings settings = Settings.builder().put("xpack.security.transport.ssl.client_authentication", value).build(); try ( - MockTransportService service = buildService("TS_REQUIRED_CLIENT_AUTH", Version.CURRENT, TransportVersion.current(), settings) + MockTransportService service = buildService( + "TS_REQUIRED_CLIENT_AUTH", + VersionInformation.CURRENT, + TransportVersion.current(), + settings + ) ) { TcpTransport originalTransport = (TcpTransport) service.getOriginalTransport(); try (Transport.Connection connection2 = openConnection(serviceA, service.getLocalNode(), TestProfiles.LIGHT_PROFILE)) { @@ -398,7 +403,14 @@ public void testSecurityClientAuthenticationConfigs() throws Exception { // test no client authentication value = randomCapitalization(SslClientAuthenticationMode.NONE); settings = Settings.builder().put("xpack.security.transport.ssl.client_authentication", value).build(); - try (MockTransportService service = buildService("TS_NO_CLIENT_AUTH", Version.CURRENT, TransportVersion.current(), settings)) { + try ( + MockTransportService service = buildService( + "TS_NO_CLIENT_AUTH", + VersionInformation.CURRENT, + TransportVersion.current(), + settings + ) + ) { TcpTransport originalTransport = (TcpTransport) service.getOriginalTransport(); try (Transport.Connection connection2 = openConnection(serviceA, service.getLocalNode(), TestProfiles.LIGHT_PROFILE)) { sslEngine = getEngineFromAcceptedChannel(originalTransport, connection2); @@ -411,7 +423,12 @@ public void testSecurityClientAuthenticationConfigs() throws Exception { value = randomCapitalization(SslClientAuthenticationMode.OPTIONAL); settings = Settings.builder().put("xpack.security.transport.ssl.client_authentication", value).build(); try ( - MockTransportService service = buildService("TS_OPTIONAL_CLIENT_AUTH", Version.CURRENT, TransportVersion.current(), settings) + MockTransportService service = buildService( + "TS_OPTIONAL_CLIENT_AUTH", + VersionInformation.CURRENT, + TransportVersion.current(), + settings + ) ) { TcpTransport originalTransport = (TcpTransport) service.getOriginalTransport(); try (Transport.Connection connection2 = openConnection(serviceA, service.getLocalNode(), TestProfiles.LIGHT_PROFILE)) { @@ -433,7 +450,7 @@ public void testSecurityClientAuthenticationConfigs() throws Exception { try ( MockTransportService service = buildService( "TS_PROFILE_REQUIRE_CLIENT_AUTH", - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), settings ) @@ -463,7 +480,12 @@ public void testSecurityClientAuthenticationConfigs() throws Exception { .put("transport.profiles.client.xpack.security.ssl.client_authentication", value) .build(); try ( - MockTransportService service = buildService("TS_PROFILE_NO_CLIENT_AUTH", Version.CURRENT, TransportVersion.current(), settings) + MockTransportService service = buildService( + "TS_PROFILE_NO_CLIENT_AUTH", + VersionInformation.CURRENT, + TransportVersion.current(), + settings + ) ) { TcpTransport originalTransport = (TcpTransport) service.getOriginalTransport(); TransportAddress clientAddress = originalTransport.profileBoundAddresses().get("client").publishAddress(); @@ -492,7 +514,7 @@ public void testSecurityClientAuthenticationConfigs() throws Exception { try ( MockTransportService service = buildService( "TS_PROFILE_OPTIONAL_CLIENT_AUTH", - Version.CURRENT, + VersionInformation.CURRENT, TransportVersion.current(), settings ) @@ -540,7 +562,7 @@ public void testClientChannelUsesSeparateSslConfigurationForRemoteCluster() thro .put("xpack.security.remote_cluster_server.ssl.client_authentication", "none") .build(); - try (MockTransportService fcService = buildService("FC", Version.CURRENT, TransportVersion.current(), fcSettings)) { + try (MockTransportService fcService = buildService("FC", VersionInformation.CURRENT, TransportVersion.current(), fcSettings)) { final TcpTransport originalTransport = (TcpTransport) fcService.getOriginalTransport(); final TransportAddress remoteAccessAddress = originalTransport.profileBoundAddresses().get("_remote_cluster").publishAddress(); final DiscoveryNode node = DiscoveryNodeUtils.create( @@ -551,7 +573,7 @@ public void testClientChannelUsesSeparateSslConfigurationForRemoteCluster() thro // 1. Connection will fail because FC server certificate is not trusted by default final Settings qcSettings1 = Settings.builder().build(); - try (MockTransportService qcService = buildService("QC", Version.CURRENT, TransportVersion.current(), qcSettings1)) { + try (MockTransportService qcService = buildService("QC", VersionInformation.CURRENT, TransportVersion.current(), qcSettings1)) { final ConnectTransportException e = expectThrows( ConnectTransportException.class, () -> openConnection(qcService, node, connectionProfile) @@ -568,7 +590,7 @@ public void testClientChannelUsesSeparateSslConfigurationForRemoteCluster() thro .put("remote_cluster.tcp.keep_alive", "false") .build(); try ( - MockTransportService qcService = buildService("QC", Version.CURRENT, TransportVersion.current(), qcSettings2); + MockTransportService qcService = buildService("QC", VersionInformation.CURRENT, TransportVersion.current(), qcSettings2); Transport.Connection connection = openConnection(qcService, node, connectionProfile) ) { assertThat(connection, instanceOf(StubbableTransport.WrappedConnection.class)); @@ -604,7 +626,7 @@ public void testClientChannelUsesSeparateSslConfigurationForRemoteCluster() thro .put("remote_cluster.tcp.keep_count", 102) .build(); try ( - MockTransportService qcService = buildService("QC", Version.CURRENT, TransportVersion.current(), qcSettings3); + MockTransportService qcService = buildService("QC", VersionInformation.CURRENT, TransportVersion.current(), qcSettings3); Transport.Connection connection = openConnection(qcService, node, connectionProfile) ) { assertThat(connection, instanceOf(StubbableTransport.WrappedConnection.class)); @@ -660,7 +682,7 @@ public void testRemoteClusterCanWorkWithoutSSL() throws Exception { .put("xpack.security.remote_cluster_server.ssl.enabled", "false") .build(); - try (MockTransportService fcService = buildService("FC", Version.CURRENT, TransportVersion.current(), fcSettings)) { + try (MockTransportService fcService = buildService("FC", VersionInformation.CURRENT, TransportVersion.current(), fcSettings)) { final TcpTransport originalTransport = (TcpTransport) fcService.getOriginalTransport(); final TransportAddress remoteAccessAddress = originalTransport.profileBoundAddresses().get("_remote_cluster").publishAddress(); final DiscoveryNode node = DiscoveryNodeUtils.create( @@ -670,7 +692,7 @@ public void testRemoteClusterCanWorkWithoutSSL() throws Exception { ); final Settings qcSettings = Settings.builder().put("xpack.security.remote_cluster_client.ssl.enabled", "false").build(); try ( - MockTransportService qcService = buildService("QC", Version.CURRENT, TransportVersion.current(), qcSettings); + MockTransportService qcService = buildService("QC", VersionInformation.CURRENT, TransportVersion.current(), qcSettings); Transport.Connection connection = openConnection(qcService, node, connectionProfile) ) { assertThat(connection, instanceOf(StubbableTransport.WrappedConnection.class)); @@ -717,7 +739,7 @@ public void testGetClientBootstrap() { .put(TransportSettings.TCP_REUSE_ADDRESS.getKey(), randomBoolean()); } final Settings qcSettings1 = builder1.build(); - try (MockTransportService qcService = buildService("QC", Version.CURRENT, TransportVersion.current(), qcSettings1)) { + try (MockTransportService qcService = buildService("QC", VersionInformation.CURRENT, TransportVersion.current(), qcSettings1)) { final var transport = (TestSecurityNetty4ServerTransport) qcService.getOriginalTransport(); // RCS remote cluster client final Bootstrap rcsBootstrap = transport.getClientBootstrap(connectionProfile); @@ -777,7 +799,7 @@ public void testGetClientBootstrap() { .put(RemoteClusterPortSettings.TCP_REUSE_ADDRESS.getKey(), false) .build(); - try (MockTransportService qcService = buildService("QC", Version.CURRENT, TransportVersion.current(), qcSettings2)) { + try (MockTransportService qcService = buildService("QC", VersionInformation.CURRENT, TransportVersion.current(), qcSettings2)) { final var transport = (TestSecurityNetty4ServerTransport) qcService.getOriginalTransport(); // RCS remote cluster client final Map, Object> rcsOptions = transport.getClientBootstrap(connectionProfile).config().options(); @@ -814,7 +836,7 @@ public void testGetClientBootstrap() { .put(RemoteClusterPortSettings.TCP_KEEP_INTERVAL.getKey(), 101) .put(RemoteClusterPortSettings.TCP_KEEP_COUNT.getKey(), 102) .build(); - try (MockTransportService qcService = buildService("QC", Version.CURRENT, TransportVersion.current(), qcSettings3)) { + try (MockTransportService qcService = buildService("QC", VersionInformation.CURRENT, TransportVersion.current(), qcSettings3)) { final var transport = (TestSecurityNetty4ServerTransport) qcService.getOriginalTransport(); // RCS remote cluster client final Map, Object> rcsOptions = transport.getClientBootstrap(connectionProfile).config().options(); diff --git a/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownTasksIT.java b/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownTasksIT.java index afd7b966b550..eeb766ff70c9 100644 --- a/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownTasksIT.java +++ b/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownTasksIT.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.persistent.PersistentTaskParams; import org.elasticsearch.persistent.PersistentTaskState; @@ -146,7 +147,8 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { taskExecutor = new TaskExecutor(client, clusterService, threadPool); return Collections.singletonList(taskExecutor); diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/ShutdownPlugin.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/ShutdownPlugin.java index 361ff34ff388..3b9b7a696c62 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/ShutdownPlugin.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/ShutdownPlugin.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; @@ -53,7 +54,8 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { NodeSeenService nodeSeenService = new NodeSeenService(clusterService); diff --git a/x-pack/plugin/snapshot-based-recoveries/src/main/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerService.java b/x-pack/plugin/snapshot-based-recoveries/src/main/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerService.java index a488a491c6d0..ecbe42f960d5 100644 --- a/x-pack/plugin/snapshot-based-recoveries/src/main/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerService.java +++ b/x-pack/plugin/snapshot-based-recoveries/src/main/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerService.java @@ -189,7 +189,7 @@ private boolean isSnapshotVersionCompatible(ShardSnapshot snapshot) { // same version. if (commitVersion == null) { assert SEQ_NO_SNAPSHOT_RECOVERIES_SUPPORTED_VERSION.luceneVersion().onOrAfter(snapshot.getCommitLuceneVersion()); - return IndexVersion.CURRENT.luceneVersion().onOrAfter(snapshot.getCommitLuceneVersion()); + return IndexVersion.current().luceneVersion().onOrAfter(snapshot.getCommitLuceneVersion()); } return commitVersion.onOrBefore(Version.CURRENT); } diff --git a/x-pack/plugin/snapshot-based-recoveries/src/test/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerServiceTests.java b/x-pack/plugin/snapshot-based-recoveries/src/test/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerServiceTests.java index 3fad20366c27..7f355d998fcf 100644 --- a/x-pack/plugin/snapshot-based-recoveries/src/test/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerServiceTests.java +++ b/x-pack/plugin/snapshot-based-recoveries/src/test/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerServiceTests.java @@ -601,7 +601,7 @@ private void writeRandomDocs(Store store, int numDocs) throws IOException { } private ShardSnapshot createShardSnapshotThatDoNotShareSegmentFiles(String repoName) { - return createShardSnapshotThatDoNotShareSegmentFiles(repoName, Version.CURRENT, IndexVersion.CURRENT.luceneVersion()); + return createShardSnapshotThatDoNotShareSegmentFiles(repoName, Version.CURRENT, IndexVersion.current().luceneVersion()); } private ShardSnapshot createShardSnapshotThatDoNotShareSegmentFiles( @@ -630,7 +630,7 @@ private ShardSnapshot createShardSnapshotThatSharesSegmentFiles(Store store, Str ); snapshotFiles.add(fileInfo); } - return createShardSnapshot(repository, snapshotFiles, Version.CURRENT, IndexVersion.CURRENT.luceneVersion()); + return createShardSnapshot(repository, snapshotFiles, Version.CURRENT, IndexVersion.current().luceneVersion()); } private ShardSnapshot createShardSnapshot( diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapperTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapperTests.java index e841979ac883..d9bf8a5a5791 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapperTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldMapperTests.java @@ -274,7 +274,7 @@ public void testShapeMapperMerge() throws Exception { public void testInvalidCurrentVersion() { MapperParsingException e = expectThrows( MapperParsingException.class, - () -> super.createMapperService(IndexVersion.CURRENT, fieldMapping((b) -> { + () -> super.createMapperService(IndexVersion.current(), fieldMapping((b) -> { b.field("type", getFieldName()).field("strategy", "recursive"); })) ); diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldTypeTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldTypeTests.java index 340b166aa2ea..4ae293a796a1 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldTypeTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldTypeTests.java @@ -40,7 +40,7 @@ public void testFetchSourceValue() throws IOException { ); final MappedFieldType mapper = new GeoShapeWithDocValuesFieldMapper.Builder( "field", - IndexVersion.CURRENT, + IndexVersion.current(), false, false, geoFormatterFactory @@ -101,7 +101,7 @@ public void testFetchStoredValue() throws IOException { final MappedFieldType mapper = new GeoShapeWithDocValuesFieldMapper.Builder( "field", - IndexVersion.CURRENT, + IndexVersion.current(), false, false, geoFormatterFactory @@ -145,7 +145,7 @@ private void fetchVectorTile(Geometry geometry) throws IOException { ); final MappedFieldType mapper = new GeoShapeWithDocValuesFieldMapper.Builder( "field", - IndexVersion.CURRENT, + IndexVersion.current(), false, false, geoFormatterFactory @@ -257,7 +257,7 @@ private void assertFetchSourceGeometry(Object sourceValue, String wktValue, Map< ); final MappedFieldType mapper = new GeoShapeWithDocValuesFieldMapper.Builder( "field", - IndexVersion.CURRENT, + IndexVersion.current(), false, false, geoFormatterFactory @@ -273,7 +273,7 @@ private void assertFetchStoredGeometry(String wktValue, Map json ); final MappedFieldType mapper = new GeoShapeWithDocValuesFieldMapper.Builder( "field", - IndexVersion.CURRENT, + IndexVersion.current(), false, false, geoFormatterFactory @@ -293,7 +293,7 @@ private void assertFetchSourceMVT(Object sourceValue, String mvtEquivalentAsWKT) ); final MappedFieldType mapper = new GeoShapeWithDocValuesFieldMapper.Builder( "field", - IndexVersion.CURRENT, + IndexVersion.current(), false, false, geoFormatterFactory diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldTypeTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldTypeTests.java index fd84019e79ca..92309e96a660 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldTypeTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldTypeTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.spatial.index.mapper; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperBuilderContext; @@ -15,13 +16,12 @@ import java.util.List; import java.util.Map; -import static org.elasticsearch.index.IndexVersion.CURRENT; - public class ShapeFieldTypeTests extends FieldTypeTestCase { public void testFetchSourceValue() throws IOException { - MappedFieldType mapper = new ShapeFieldMapper.Builder("field", CURRENT, false, true).build(MapperBuilderContext.root(false)) - .fieldType(); + MappedFieldType mapper = new ShapeFieldMapper.Builder("field", IndexVersion.current(), false, true).build( + MapperBuilderContext.root(false) + ).fieldType(); Map jsonLineString = Map.of("type", "LineString", "coordinates", List.of(List.of(42.0, 27.1), List.of(30.0, 50.0))); Map jsonPoint = Map.of("type", "Point", "coordinates", List.of(14.3, 15.0)); diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/ingest/CircleProcessorTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/ingest/CircleProcessorTests.java index ce4462520100..f25ab58a1d7b 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/ingest/CircleProcessorTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/ingest/CircleProcessorTests.java @@ -212,7 +212,7 @@ public void testGeoShapeQueryAcrossDateline() throws IOException { SearchExecutionContext mockedContext = mock(SearchExecutionContext.class); when(mockedContext.getFieldType(any())).thenReturn(shapeType); - when(mockedContext.indexVersionCreated()).thenReturn(IndexVersion.CURRENT); + when(mockedContext.indexVersionCreated()).thenReturn(IndexVersion.current()); Query sameShapeQuery = shapeType.geoShapeQuery(mockedContext, fieldName, ShapeRelation.INTERSECTS, geometry); Query pointOnDatelineQuery = shapeType.geoShapeQuery( mockedContext, diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlPlugin.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlPlugin.java index 7a18a886e298..6e788c0ebcdc 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlPlugin.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlPlugin.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.license.License; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.LicensedFeature; @@ -99,7 +100,8 @@ public Collection createComponents( IndexNameExpressionResolver expressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { return createComponents(client, environment.settings(), clusterService, namedWriteableRegistry); diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/rate.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/rate.yml index 5b0d55b37223..ea1dd5bb9961 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/rate.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/rate.yml @@ -442,3 +442,100 @@ - close_to: { aggregations.ts.buckets.0.rate-month.value: { value: 63958020.00, error: 0.01 }} - close_to: { aggregations.ts.buckets.0.rate-quarter.value: { value: 191874060.00, error: 0.01 }} - close_to: { aggregations.ts.buckets.0.rate-year.value: { value: 767496240.00, error: 0.01 }} + +--- +"rate aggregation on counter field partial bucket": + - skip: + version: " - 8.6.99" + reason: "counter field support added in 8.7" + features: close_to + + - do: + indices.create: + index: test-rate + body: + settings: + index: + mode: time_series + routing_path: [ host ] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z + mappings: + properties: + "@timestamp": + type: date + host: + type: keyword + time_series_dimension: true + bytes_counter: + type: long + time_series_metric: counter + + - do: + bulk: + refresh: true + index: test-rate + body: + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:01:04.000Z", "host": "one", "bytes_counter": 1000 }' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:01:14.000Z", "host": "one", "bytes_counter": 1100 }' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:01:24.000Z", "host": "one", "bytes_counter": 1200 }' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:01:34.000Z", "host": "one", "bytes_counter": 1250 }' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:01:44.000Z", "host": "one", "bytes_counter": 1310 }' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:01:54.000Z", "host": "one", "bytes_counter": 1350 }' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:01:01.000Z", "host": "two", "bytes_counter": 1000 }' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:01:11.000Z", "host": "two", "bytes_counter": 1100 }' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:01:21.000Z", "host": "two", "bytes_counter": 1200 }' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:01:31.000Z", "host": "two", "bytes_counter": 1250 }' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:01:41.000Z", "host": "two", "bytes_counter": 1310 }' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:01:51.000Z", "host": "two", "bytes_counter": 1350 }' + + - do: + search: + index: test-rate + body: + size: 0 + query: + bool: + filter: + range: + "@timestamp": + gte: "2021-04-28T18:01:03.000Z" + lte: "2021-04-28T18:18:00.000Z" + aggs: + date_histogram: + date_histogram: + field: "@timestamp" + calendar_interval: 1h + time_zone: Europe/Ljubljana + min_doc_count: 1 + aggs: + counter_rate: + time_series: + keyed: false + aggs: + bytes_counter_rate: + rate: + field: bytes_counter + unit: second + + - match: { hits.total.value: 11 } + - length: { aggregations.date_histogram.buckets: 1 } + - match: { aggregations.date_histogram.buckets.0.key_as_string: "2021-04-28T20:00:00.000+02:00" } + - match: { aggregations.date_histogram.buckets.0.doc_count: 11 } + # NOTE: (1350 - 1000) / (54 - 4) = 350 / 50 = 7.0 + - close_to: { aggregations.date_histogram.buckets.0.counter_rate.buckets.0.bytes_counter_rate.value: { value: 7.00, error: 0.01 } } + # NOTE: (1350 - 1100) / (51 - 11) = 250 / 40 = 6.25 (we filter out the first sample due to the bool range filter) + - close_to: { aggregations.date_histogram.buckets.0.counter_rate.buckets.1.bytes_counter_rate.value: { value: 6.25, error: 0.01 } } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/inference_rescore.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/inference_rescore.yml new file mode 100644 index 000000000000..824999b3b300 --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/inference_rescore.yml @@ -0,0 +1,251 @@ +setup: + - skip: + features: headers + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + ml.put_trained_model: + model_id: ltr-model + body: > + { + "description": "super complex model for tests", + "input": {"field_names": ["cost", "product"]}, + "inference_config": { + "learn_to_rank": { + } + }, + "definition": { + "preprocessors" : [{ + "one_hot_encoding": { + "field": "product", + "hot_map": { + "TV": "type_tv", + "VCR": "type_vcr", + "Laptop": "type_laptop" + } + } + }], + "trained_model": { + "ensemble": { + "feature_names": ["cost", "type_tv", "type_vcr", "type_laptop"], + "target_type": "regression", + "trained_models": [ + { + "tree": { + "feature_names": [ + "cost" + ], + "tree_structure": [ + { + "node_index": 0, + "split_feature": 0, + "split_gain": 12, + "threshold": 400, + "decision_type": "lte", + "default_left": true, + "left_child": 1, + "right_child": 2 + }, + { + "node_index": 1, + "leaf_value": 5.0 + }, + { + "node_index": 2, + "leaf_value": 2.0 + } + ], + "target_type": "regression" + } + }, + { + "tree": { + "feature_names": [ + "type_tv" + ], + "tree_structure": [ + { + "node_index": 0, + "split_feature": 0, + "split_gain": 12, + "threshold": 1, + "decision_type": "lt", + "default_left": true, + "left_child": 1, + "right_child": 2 + }, + { + "node_index": 1, + "leaf_value": 1.0 + }, + { + "node_index": 2, + "leaf_value": 12.0 + } + ], + "target_type": "regression" + } + } + ] + } + } + } + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.create: + index: store + body: + mappings: + properties: + product: + type: keyword + cost: + type: integer + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + Content-Type: application/json + bulk: + index: store + refresh: true + body: | + { "index": {} } + { "product": "TV", "cost": 300 } + { "index": {} } + { "product": "TV", "cost": 400} + { "index": {} } + { "product": "TV", "cost": 600} + { "index": {} } + { "product": "VCR", "cost": 15} + { "index": {} } + { "product": "VCR", "cost": 350} + { "index": {} } + { "product": "VCR", "cost": 580} + { "index": {} } + { "product": "Laptop", "cost": 100} + { "index": {} } + { "product": "Laptop", "cost": 300} + { "index": {} } + { "product": "Laptop", "cost": 500} + +--- +"Test rescore with stored model": + - skip: + version: all + reason: "@AwaitsFix https://github.com/elastic/elasticsearch/issues/80703" + + - do: + search: + index: store + size: 3 + body: > + { + "rescore": { + "window_size": 10, + "inference": { "model_id": "ltr-model" } + } + } + - match: { hits.hits.0._score: 17.0 } + - match: { hits.hits.1._score: 17.0 } + - match: { hits.hits.2._score: 14.0 } + + - do: + search: + index: store + size: 3 + body: > + { + "query": {"term": {"product": "Laptop"}}, + "rescore": { + "window_size": 10, + "inference": { "model_id": "ltr-model" } + } + } + - match: { hits.hits.0._score: 6.0 } + - match: { hits.hits.1._score: 6.0 } + - match: { hits.hits.2._score: 3.0 } +--- +"Test rescore with stored model and smaller window_size": + - skip: + version: all + reason: "@AwaitsFix https://github.com/elastic/elasticsearch/issues/80703" + + - do: + search: + index: store + size: 5 + body: > + { + "rescore": { + "window_size": 2, + "inference": { "model_id": "ltr-model" } + } + } + - match: { hits.hits.0._score: 17.0 } + - match: { hits.hits.1._score: 17.0 } + - match: { hits.hits.2._score: 1.0 } + - match: { hits.hits.3._score: 1.0 } + - match: { hits.hits.4._score: 1.0 } +--- +"Test rescore with stored model and chained rescorers": + - skip: + version: all + reason: "@AwaitsFix https://github.com/elastic/elasticsearch/issues/80703" + + - do: + search: + index: store + size: 5 + body: > + { + "rescore": [ + { + "window_size": 4, + "query": { "rescore_query":{ "script_score": {"query": {"match_all": {}}, "script": {"source": "return 4"}}}} + }, + { + "window_size": 3, + "inference": { "model_id": "ltr-model" } + }, + { + "window_size": 2, + "query": { "rescore_query": { "script_score": {"query": {"match_all": {}}, "script": {"source": "return 20"}}}} + } + ] + } + - match: { hits.hits.0._score: 37.0 } + - match: { hits.hits.1._score: 37.0 } + - match: { hits.hits.2._score: 14.0 } + - match: { hits.hits.3._score: 5.0 } + - match: { hits.hits.4._score: 1.0 } +--- +"Test rescore with missing model": + - do: + catch: missing + search: + index: store + body: > + { + "rescore": { + "window_size": 10, + "inference": { "model_id": "ltr-missing" } + } + } +--- +"Test rescore with no hits model": + - do: + search: + index: store + body: > + { + "query": {"term": {"product": "Speaker"}}, + "rescore": { + "window_size": 10, + "inference": { "model_id": "ltr-model" } + } + } + - length: { hits.hits: 0 } diff --git a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackPlugin.java b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackPlugin.java index fb647f851139..35f651f91ccb 100644 --- a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackPlugin.java +++ b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackPlugin.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; @@ -55,7 +56,8 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { StackTemplateRegistry templateRegistry = new StackTemplateRegistry(settings, clusterService, threadPool, client, xContentRegistry); templateRegistry.initialize(); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java index 0407bdbb42c8..e0fa01a09ae5 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java @@ -37,6 +37,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.indices.AssociatedIndexDescriptor; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.persistent.PersistentTasksExecutor; @@ -240,7 +241,8 @@ public Collection createComponents( IndexNameExpressionResolver expressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { TransformConfigManager configManager = new IndexBasedTransformConfigManager( clusterService, diff --git a/x-pack/plugin/voting-only-node/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePluginTests.java b/x-pack/plugin/voting-only-node/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePluginTests.java index 3a6acd5ac9e9..f615dca11499 100644 --- a/x-pack/plugin/voting-only-node/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePluginTests.java +++ b/x-pack/plugin/voting-only-node/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePluginTests.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.cluster.coordination.votingonly; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; @@ -21,6 +20,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.discovery.MasterNotDiscoveredException; import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.node.Node; import org.elasticsearch.plugins.Plugin; @@ -219,7 +219,7 @@ public void testBasicSnapshotRestoreWorkFlow() { assertThat(snapshotInfos.size(), Matchers.equalTo(1)); SnapshotInfo snapshotInfo = snapshotInfos.get(0); assertThat(snapshotInfo.state(), Matchers.equalTo(SnapshotState.SUCCESS)); - assertThat(snapshotInfo.version(), Matchers.equalTo(Version.CURRENT)); + assertThat(snapshotInfo.version(), Matchers.equalTo(IndexVersion.current())); logger.info("--> close indices"); client.admin().indices().prepareClose("test-idx-1", "test-idx-2").get(); diff --git a/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePlugin.java b/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePlugin.java index d2f9887921d8..247e18dfb6cb 100644 --- a/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePlugin.java +++ b/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePlugin.java @@ -29,6 +29,7 @@ import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.ClusterCoordinationPlugin; import org.elasticsearch.plugins.NetworkPlugin; @@ -95,7 +96,8 @@ public Collection createComponents( IndexNameExpressionResolver expressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { this.threadPool.set(threadPool); return Collections.emptyList(); diff --git a/x-pack/plugin/watcher/build.gradle b/x-pack/plugin/watcher/build.gradle index d111e807a195..244148fa7918 100644 --- a/x-pack/plugin/watcher/build.gradle +++ b/x-pack/plugin/watcher/build.gradle @@ -30,7 +30,7 @@ dependencies { // watcher deps api 'com.googlecode.owasp-java-html-sanitizer:owasp-java-html-sanitizer:20211018.2' - runtimeOnly 'com.google.guava:guava:30.1-jre' // needed by watcher for the html sanitizer + runtimeOnly 'com.google.guava:guava:32.0.1-jre' // needed by watcher for the html sanitizer runtimeOnly 'com.google.guava:failureaccess:1.0.1' api 'com.sun.mail:jakarta.mail:1.6.4' api 'com.sun.activation:jakarta.activation:1.2.1' @@ -54,7 +54,6 @@ tasks.named("thirdPartyAudit").configure { 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray', 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$1', 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$2', - 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$3', 'com.google.common.hash.Striped64', 'com.google.common.hash.Striped64$1', 'com.google.common.hash.Striped64$Cell', diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java index aec58dc1120f..e4f3d57846e6 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java @@ -41,6 +41,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.IndexModule; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.plugins.Plugin; @@ -315,7 +316,8 @@ public Collection createComponents( IndexNameExpressionResolver expressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationService allocationService + AllocationService allocationService, + IndicesService indicesService ) { if (enabled == false) { return Collections.emptyList(); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java index 94cb554b6a5f..a2205194a543 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java @@ -73,7 +73,7 @@ public void testWatcherDisabledTests() throws Exception { // also no component creation if not enabled assertThat( - watcher.createComponents(null, null, null, null, null, null, null, null, null, null, null, Tracer.NOOP, null), + watcher.createComponents(null, null, null, null, null, null, null, null, null, null, null, Tracer.NOOP, null, null), hasSize(0) ); diff --git a/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldAggregationTests.java b/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldAggregationTests.java index fe23bd28768e..f292f1baa567 100644 --- a/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldAggregationTests.java +++ b/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldAggregationTests.java @@ -37,7 +37,7 @@ public class WildcardFieldAggregationTests extends AggregatorTestCase { @Before public void setup() { - WildcardFieldMapper.Builder builder = new WildcardFieldMapper.Builder(WILDCARD_FIELD_NAME, IndexVersion.CURRENT); + WildcardFieldMapper.Builder builder = new WildcardFieldMapper.Builder(WILDCARD_FIELD_NAME, IndexVersion.current()); builder.ignoreAbove(MAX_FIELD_LENGTH); wildcardFieldMapper = builder.build(MapperBuilderContext.root(false)); diff --git a/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java b/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java index fc46c32be37f..49f40e3cfa85 100644 --- a/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java +++ b/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java @@ -101,7 +101,7 @@ static SearchExecutionContext createMockSearchExecutionContext(boolean allowExpe private static final String KEYWORD_FIELD_NAME = "keyword_field"; private static final String WILDCARD_FIELD_NAME = "wildcard_field"; - public static final SearchExecutionContext MOCK_CONTEXT = createMockSearchExecutionContext(true, IndexVersion.CURRENT); + public static final SearchExecutionContext MOCK_CONTEXT = createMockSearchExecutionContext(true, IndexVersion.current()); static final int MAX_FIELD_LENGTH = 30; static WildcardFieldMapper wildcardFieldType; @@ -123,7 +123,7 @@ protected boolean supportsStoredFields() { @Override @Before public void setUp() throws Exception { - Builder builder = new WildcardFieldMapper.Builder(WILDCARD_FIELD_NAME, IndexVersion.CURRENT); + Builder builder = new WildcardFieldMapper.Builder(WILDCARD_FIELD_NAME, IndexVersion.current()); builder.ignoreAbove(MAX_FIELD_LENGTH); wildcardFieldType = builder.build(MapperBuilderContext.root(false)); @@ -132,7 +132,7 @@ public void setUp() throws Exception { org.elasticsearch.index.mapper.KeywordFieldMapper.Builder kwBuilder = new KeywordFieldMapper.Builder( KEYWORD_FIELD_NAME, - IndexVersion.CURRENT + IndexVersion.current() ); keywordFieldType = kwBuilder.build(MapperBuilderContext.root(false)); diff --git a/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldTypeTests.java b/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldTypeTests.java index 8c5d6371b6c9..58c37f22c32b 100644 --- a/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldTypeTests.java +++ b/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldTypeTests.java @@ -18,20 +18,20 @@ public class WildcardFieldTypeTests extends FieldTypeTestCase { public void testFetchSourceValue() throws IOException { - MappedFieldType mapper = new WildcardFieldMapper.Builder("field", IndexVersion.CURRENT).build(MapperBuilderContext.root(false)) + MappedFieldType mapper = new WildcardFieldMapper.Builder("field", IndexVersion.current()).build(MapperBuilderContext.root(false)) .fieldType(); assertEquals(List.of("value"), fetchSourceValue(mapper, "value")); assertEquals(List.of("42"), fetchSourceValue(mapper, 42L)); assertEquals(List.of("true"), fetchSourceValue(mapper, true)); - MappedFieldType ignoreAboveMapper = new WildcardFieldMapper.Builder("field", IndexVersion.CURRENT).ignoreAbove(4) + MappedFieldType ignoreAboveMapper = new WildcardFieldMapper.Builder("field", IndexVersion.current()).ignoreAbove(4) .build(MapperBuilderContext.root(false)) .fieldType(); assertEquals(List.of(), fetchSourceValue(ignoreAboveMapper, "value")); assertEquals(List.of("42"), fetchSourceValue(ignoreAboveMapper, 42L)); assertEquals(List.of("true"), fetchSourceValue(ignoreAboveMapper, true)); - MappedFieldType nullValueMapper = new WildcardFieldMapper.Builder("field", IndexVersion.CURRENT).nullValue("NULL") + MappedFieldType nullValueMapper = new WildcardFieldMapper.Builder("field", IndexVersion.current()).nullValue("NULL") .build(MapperBuilderContext.root(false)) .fieldType(); assertEquals(List.of("NULL"), fetchSourceValue(nullValueMapper, null)); diff --git a/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java b/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java index f4acc4491def..bb018372b154 100644 --- a/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java +++ b/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java @@ -213,7 +213,7 @@ private void beforeRestart( assertEquals(numberOfShards, (int) getResp.evaluate("snapshots.0.shards.successful")); assertEquals(numberOfShards, (int) getResp.evaluate("snapshots.0.shards.total")); assertEquals(0, (int) getResp.evaluate("snapshots.0.shards.failed")); - assertEquals(oldVersion.toString(), getResp.evaluate("snapshots.0.version")); + assertEquals(oldVersion.indexVersion.toString(), getResp.evaluate("snapshots.0.version")); // list specific snapshot on new ES getSnaps = new Request("GET", "/_snapshot/" + repoName + "/" + snapshotName); @@ -227,7 +227,7 @@ private void beforeRestart( assertEquals(numberOfShards, (int) getResp.evaluate("snapshots.0.shards.successful")); assertEquals(numberOfShards, (int) getResp.evaluate("snapshots.0.shards.total")); assertEquals(0, (int) getResp.evaluate("snapshots.0.shards.failed")); - assertEquals(oldVersion.toString(), getResp.evaluate("snapshots.0.version")); + assertEquals(oldVersion.indexVersion.toString(), getResp.evaluate("snapshots.0.version")); // list advanced snapshot info on new ES getSnaps = new Request("GET", "/_snapshot/" + repoName + "/" + snapshotName + "/_status");