diff --git a/buildSrc/src/main/groovy/org/opensearch/gradle/precommit/LicenseHeadersTask.groovy b/buildSrc/src/main/groovy/org/opensearch/gradle/precommit/LicenseHeadersTask.groovy index b8d0ed2b9c43c..5524f8a6a39bb 100644 --- a/buildSrc/src/main/groovy/org/opensearch/gradle/precommit/LicenseHeadersTask.groovy +++ b/buildSrc/src/main/groovy/org/opensearch/gradle/precommit/LicenseHeadersTask.groovy @@ -147,8 +147,10 @@ class LicenseHeadersTask extends AntTask { // Generated resources substringMatcher(licenseFamilyCategory: "GEN ", licenseFamilyName: "Generated") { - // parsers generated by antlr - pattern(substring: "ANTLR GENERATED CODE") + // parsers generated by antlr + pattern(substring: "ANTLR GENERATED CODE") + // Protobuf + pattern(substring: "Generated by the protocol buffer compiler") } // Vendored Code diff --git a/gradle/forbidden-dependencies.gradle b/gradle/forbidden-dependencies.gradle index 3f6c84fa6ec05..05d5881ffc19f 100644 --- a/gradle/forbidden-dependencies.gradle +++ b/gradle/forbidden-dependencies.gradle @@ -13,7 +13,8 @@ // we do not want any of these dependencies on the compilation classpath // because they could then be used within OpenSearch List FORBIDDEN_DEPENDENCIES = [ - 'guava' + // TODO: Why is guava forbidden? + // 'guava' ] Closure checkDeps = { Configuration configuration -> diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 8ff3f6e45397d..8dbf21f440feb 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -20,13 +20,20 @@ jettison = "1.5.4" woodstox = "6.4.0" kotlin = "1.7.10" antlr4 = "4.13.1" -guava = "32.1.1-jre" -protobuf = "3.25.5" jakarta_annotation = "1.3.5" google_http_client = "1.44.1" tdigest = "3.3" hdrhistogram = "2.2.2" -grpc = "1.68.0" + +# gRPC +grpc = "1.68.1" +protobuf = "3.25.5" +guava = "32.1.1-jre" +jsr305 = "3.0.2" +failureaccess = "1.0.1" +error_prone_annotations = "2.24.1" +javax_annotations = "1.3.2" +perfmark_api = "0.26.0" # when updating the JNA version, also update the version in buildSrc/build.gradle jna = "5.13.0" diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index a8a165df637a2..ec4057bb0100a 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -65,6 +65,22 @@ dependencies { api "io.netty:netty-resolver:${versions.netty}" api "io.netty:netty-transport:${versions.netty}" api "io.netty:netty-transport-native-unix-common:${versions.netty}" + + // gRPC + compileOnly "javax.annotation:javax.annotation-api:${versions.javax_annotations}" + compileOnly "com.google.code.findbugs:jsr305:${versions.jsr305}" + runtimeOnly "com.google.guava:guava:${versions.guava}" + api "com.google.guava:failureaccess:${versions.failureaccess}" + api "com.google.errorprone:error_prone_annotations:${versions.error_prone_annotations}" + api "io.perfmark:perfmark-api:${versions.perfmark_api}" + api "io.grpc:grpc-all:${versions.grpc}" + api "io.grpc:grpc-netty:${versions.grpc}" + api "io.grpc:grpc-api:${versions.grpc}" + api "io.grpc:grpc-protobuf-lite:${versions.grpc}" + api "io.grpc:grpc-protobuf:${versions.grpc}" + api "io.grpc:grpc-stub:${versions.grpc}" + api "io.grpc:grpc-core:${versions.grpc}" + api "io.grpc:grpc-services:${versions.grpc}" } restResources { diff --git a/modules/transport-netty4/licenses/error_prone_annotations-2.24.1.jar.sha1 b/modules/transport-netty4/licenses/error_prone_annotations-2.24.1.jar.sha1 new file mode 100644 index 0000000000000..67723f6f51248 --- /dev/null +++ b/modules/transport-netty4/licenses/error_prone_annotations-2.24.1.jar.sha1 @@ -0,0 +1 @@ +32b299e45105aa9b0df8279c74dc1edfcf313ff0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/failureaccess-1.0.2.jar.sha1 b/modules/transport-netty4/licenses/failureaccess-1.0.2.jar.sha1 new file mode 100644 index 0000000000000..e1dbdc6bf7320 --- /dev/null +++ b/modules/transport-netty4/licenses/failureaccess-1.0.2.jar.sha1 @@ -0,0 +1 @@ +c4a06a64e650562f30b7bf9aaec1bfed43aca12b \ No newline at end of file diff --git a/modules/transport-netty4/licenses/grpc-all-1.68.0.jar.sha1 b/modules/transport-netty4/licenses/grpc-all-1.68.0.jar.sha1 new file mode 100644 index 0000000000000..e7979674ebdc8 --- /dev/null +++ b/modules/transport-netty4/licenses/grpc-all-1.68.0.jar.sha1 @@ -0,0 +1 @@ +a150a5dac0c120f8da34d59c7730d4a5fe34ec8c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/grpc-api-1.68.0.jar.sha1 b/modules/transport-netty4/licenses/grpc-api-1.68.0.jar.sha1 new file mode 100644 index 0000000000000..bf45716c5b8ce --- /dev/null +++ b/modules/transport-netty4/licenses/grpc-api-1.68.0.jar.sha1 @@ -0,0 +1 @@ +9a9f25c58d8d5b0fcf37ae889a50fec87e34ac08 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/grpc-core-1.68.0.jar.sha1 b/modules/transport-netty4/licenses/grpc-core-1.68.0.jar.sha1 new file mode 100644 index 0000000000000..94d401e4da28d --- /dev/null +++ b/modules/transport-netty4/licenses/grpc-core-1.68.0.jar.sha1 @@ -0,0 +1 @@ +e5630dfd653d7cad78caf7166e36973f55822e6c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/grpc-netty-1.68.0.jar.sha1 b/modules/transport-netty4/licenses/grpc-netty-1.68.0.jar.sha1 new file mode 100644 index 0000000000000..584bbb0d38032 --- /dev/null +++ b/modules/transport-netty4/licenses/grpc-netty-1.68.0.jar.sha1 @@ -0,0 +1 @@ +0ac762f09db8e74f9b17fff5f7f1bcf9a13c5620 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/grpc-protobuf-1.68.0.jar.sha1 b/modules/transport-netty4/licenses/grpc-protobuf-1.68.0.jar.sha1 new file mode 100644 index 0000000000000..dcab201071c3c --- /dev/null +++ b/modules/transport-netty4/licenses/grpc-protobuf-1.68.0.jar.sha1 @@ -0,0 +1 @@ +b8c1772b35292a853f0707a3512090a8edad1fed \ No newline at end of file diff --git a/modules/transport-netty4/licenses/grpc-protobuf-lite-1.68.0.jar.sha1 b/modules/transport-netty4/licenses/grpc-protobuf-lite-1.68.0.jar.sha1 new file mode 100644 index 0000000000000..85b338166a5ab --- /dev/null +++ b/modules/transport-netty4/licenses/grpc-protobuf-lite-1.68.0.jar.sha1 @@ -0,0 +1 @@ +fe6a5349fd76e811c19f16e2b3e9453ee339df4b \ No newline at end of file diff --git a/modules/transport-netty4/licenses/grpc-services-1.68.0.jar.sha1 b/modules/transport-netty4/licenses/grpc-services-1.68.0.jar.sha1 new file mode 100644 index 0000000000000..7cff190735825 --- /dev/null +++ b/modules/transport-netty4/licenses/grpc-services-1.68.0.jar.sha1 @@ -0,0 +1 @@ +b3aa84b0e7cbe4135d27644dd25e3f1cd4eeed85 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/grpc-stub-1.68.0.jar.sha1 b/modules/transport-netty4/licenses/grpc-stub-1.68.0.jar.sha1 new file mode 100644 index 0000000000000..0218d51268ce3 --- /dev/null +++ b/modules/transport-netty4/licenses/grpc-stub-1.68.0.jar.sha1 @@ -0,0 +1 @@ +ebd89ad550b74724b1bbe8a04203921ebac5e6d4 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/guava-32.1.1-jre.jar.sha1 b/modules/transport-netty4/licenses/guava-32.1.1-jre.jar.sha1 new file mode 100644 index 0000000000000..0d791b5d3f55b --- /dev/null +++ b/modules/transport-netty4/licenses/guava-32.1.1-jre.jar.sha1 @@ -0,0 +1 @@ +ad575652d84153075dd41ec6177ccb15251262b2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/perfmark-api-0.26.0.jar.sha1 b/modules/transport-netty4/licenses/perfmark-api-0.26.0.jar.sha1 new file mode 100644 index 0000000000000..abf1becd13298 --- /dev/null +++ b/modules/transport-netty4/licenses/perfmark-api-0.26.0.jar.sha1 @@ -0,0 +1 @@ +ef65452adaf20bf7d12ef55913aba24037b82738 \ No newline at end of file diff --git a/modules/transport-netty4/src/main/java/org/opensearch/grpc/netty4/Netty4GrpcServerTransport.java b/modules/transport-netty4/src/main/java/org/opensearch/grpc/netty4/Netty4GrpcServerTransport.java new file mode 100644 index 0000000000000..5a60cd21b38ad --- /dev/null +++ b/modules/transport-netty4/src/main/java/org/opensearch/grpc/netty4/Netty4GrpcServerTransport.java @@ -0,0 +1,182 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.grpc.netty4; + +import io.grpc.BindableService; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.transport.PortsRange; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.grpc.AbstractGrpcServerTransport; +import org.opensearch.grpc.GrpcStats; +import org.opensearch.grpc.services.GrpcServiceRegistry; +import org.opensearch.transport.NettyAllocator; +import org.opensearch.transport.SharedGroupFactory; + +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; + +import io.grpc.ForwardingServerCall; +import io.grpc.ForwardingServerCallListener; +import io.grpc.Metadata; +import io.grpc.Server; +import io.grpc.ServerCall; +import io.grpc.ServerCallHandler; +import io.grpc.ServerInterceptor; +import io.grpc.Status; +import io.grpc.netty.NettyServerBuilder; +import io.grpc.protobuf.services.HealthStatusManager; +import io.grpc.protobuf.services.ProtoReflectionService; + +public class Netty4GrpcServerTransport extends AbstractGrpcServerTransport { + private static final Logger logger = LogManager.getLogger(Netty4GrpcServerTransport.class); + + public static final Setting SETTING_GRPC_WORKER_COUNT = Setting.intSetting("grpc.worker_count", 1, Setting.Property.NodeScope); + + private final SharedGroupFactory sharedGroupFactory; + private final GrpcServiceRegistry grpcServiceRegistry; + private final CopyOnWriteArrayList servers = new CopyOnWriteArrayList<>(); + private volatile SharedGroupFactory.SharedGroup sharedGroup; + private final ServerStatsInterceptor sharedServerStatsInterceptor; + private final AtomicLong currentOpen = new AtomicLong(0); + private final AtomicLong totalOpened = new AtomicLong(0); + + public Netty4GrpcServerTransport(Settings settings, NetworkService networkService, SharedGroupFactory sharedGroupFactory, GrpcServiceRegistry grpcServiceRegistry) { + super(settings, networkService); + this.sharedGroupFactory = sharedGroupFactory; + this.sharedServerStatsInterceptor = new ServerStatsInterceptor(currentOpen, totalOpened); + this.grpcServiceRegistry = grpcServiceRegistry; + } + + @Override + protected void doStart() { + boolean success = false; + try { + sharedGroup = sharedGroupFactory.getGRPCGroup(); + bindServer(); + success = true; + logger.info("Started gRPC server on port {}", port); + } finally { + if (!success) { + doStop(); + } + } + } + + @Override + protected TransportAddress bindAddress(InetAddress hostAddress, PortsRange portRange) { + AtomicReference lastException = new AtomicReference<>(); + AtomicReference addr = new AtomicReference<>(); + + boolean success = portRange.iterate(portNumber -> { + try { + InetSocketAddress address = new InetSocketAddress(hostAddress, portNumber); + NettyServerBuilder srvBuilder = NettyServerBuilder.forAddress(address) + .bossEventLoopGroup(sharedGroup.getLowLevelGroup()) + .workerEventLoopGroup(sharedGroup.getLowLevelGroup()) + .channelType(NettyAllocator.getServerChannelType()) + .intercept(this.sharedServerStatsInterceptor) + .addService(new HealthStatusManager().getHealthService()) + .addService(ProtoReflectionService.newInstance()); + + for (BindableService bService : grpcServiceRegistry.getServices()) { + srvBuilder.addService(bService); + } + + Server srv = srvBuilder.build().start(); + servers.add(srv); + addr.set(new TransportAddress(hostAddress, portNumber)); + logger.debug("Bound gRPC to address {{}}", address); + return true; + } catch (Exception e) { + lastException.set(e); + return false; + } + }); + + if (!success) { + throw new RuntimeException("Failed to bind to " + hostAddress + " on ports " + portRange, lastException.get()); + } + + return addr.get(); + } + + @Override + protected void doStop() { + for (Server server : servers) { + if (server != null) { + server.shutdown(); + try { + server.awaitTermination(30, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.warn("Interrupted while shutting down gRPC server"); + } finally { + server.shutdownNow(); + } + } + + if (sharedGroup != null) { + sharedGroup.shutdown(); + sharedGroup = null; + } + } + } + + @Override + protected void doClose() {} + + @Override + public GrpcStats stats() { + return new GrpcStats(totalOpened.get(), currentOpen.get()); + } + + static class ServerStatsInterceptor implements ServerInterceptor { + private final AtomicLong currentOpen; + private final AtomicLong totalOpened; + + ServerStatsInterceptor(AtomicLong currentOpen, AtomicLong totalOpened) { + this.currentOpen = currentOpen; + this.totalOpened = totalOpened; + } + + @Override + public ServerCall.Listener interceptCall( + ServerCall call, + Metadata headers, + ServerCallHandler next + ) { + logger.debug("Intercepted call - Method: {}, Authority: {}, Headers: {}", + call.getMethodDescriptor().getFullMethodName(), + call.getAuthority(), + headers); + + currentOpen.incrementAndGet(); + totalOpened.incrementAndGet(); + + return new ForwardingServerCallListener.SimpleForwardingServerCallListener( + next.startCall(new ForwardingServerCall.SimpleForwardingServerCall(call) { + @Override + public void close(Status status, Metadata trailers) { + currentOpen.decrementAndGet(); + super.close(status, trailers); + } + }, headers) + ) { + }; + } + } +} diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java index e2c84ab5d339a..d5dd3e56171de 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java @@ -44,6 +44,9 @@ import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.grpc.GrpcServerTransport; +import org.opensearch.grpc.netty4.Netty4GrpcServerTransport; +import org.opensearch.grpc.services.GrpcServiceRegistry; import org.opensearch.http.HttpServerTransport; import org.opensearch.http.netty4.Netty4HttpServerTransport; import org.opensearch.http.netty4.ssl.SecureNetty4HttpServerTransport; @@ -68,6 +71,7 @@ public class Netty4ModulePlugin extends Plugin implements NetworkPlugin { public static final String NETTY_SECURE_TRANSPORT_NAME = "netty4-secure"; public static final String NETTY_HTTP_TRANSPORT_NAME = "netty4"; public static final String NETTY_SECURE_HTTP_TRANSPORT_NAME = "netty4-secure"; + public static final String NETTY_GRPC_TRANSPORT_NAME = "netty4-grpc"; private final SetOnce groupFactory = new SetOnce<>(); @@ -150,6 +154,14 @@ public Map> getHttpTransports( ); } + @Override + public Map> getGrpcTransports(Settings settings, NetworkService networkService, GrpcServiceRegistry grpcServiceRegistry) { + return Collections.singletonMap( + NETTY_GRPC_TRANSPORT_NAME, + () -> new Netty4GrpcServerTransport(settings, networkService, getSharedGroupFactory(settings), grpcServiceRegistry) + ); + } + @Override public Map> getSecureHttpTransports( Settings settings, diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/SharedGroupFactory.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/SharedGroupFactory.java index 454293442572c..7a19e4c2b59f2 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/SharedGroupFactory.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/SharedGroupFactory.java @@ -36,6 +36,7 @@ import org.apache.logging.log4j.Logger; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.AbstractRefCounted; +import org.opensearch.grpc.netty4.Netty4GrpcServerTransport; import org.opensearch.http.HttpServerTransport; import org.opensearch.http.netty4.Netty4HttpServerTransport; import org.opensearch.transport.netty4.Netty4Transport; @@ -62,14 +63,17 @@ public final class SharedGroupFactory { private final Settings settings; private final int workerCount; private final int httpWorkerCount; + private final int grpcWorkerCount; private RefCountedGroup genericGroup; private SharedGroup dedicatedHttpGroup; + private SharedGroup dedicatedGRPCGroup; public SharedGroupFactory(Settings settings) { this.settings = settings; this.workerCount = Netty4Transport.WORKER_COUNT.get(settings); this.httpWorkerCount = Netty4HttpServerTransport.SETTING_HTTP_WORKER_COUNT.get(settings); + this.grpcWorkerCount = Netty4GrpcServerTransport.SETTING_GRPC_WORKER_COUNT.get(settings); } public Settings getSettings() { @@ -99,6 +103,21 @@ public synchronized SharedGroup getHttpGroup() { } } + public synchronized SharedGroup getGRPCGroup() { + if (grpcWorkerCount == 0) { + return getGenericGroup(); + } else { + if (dedicatedGRPCGroup == null) { + NioEventLoopGroup eventLoopGroup = new NioEventLoopGroup( + grpcWorkerCount, + daemonThreadFactory(settings, HttpServerTransport.HTTP_SERVER_WORKER_THREAD_NAME_PREFIX) + ); + dedicatedGRPCGroup = new SharedGroup(new RefCountedGroup(eventLoopGroup)); + } + return dedicatedGRPCGroup; + } + } + private SharedGroup getGenericGroup() { if (genericGroup == null) { EventLoopGroup eventLoopGroup = new NioEventLoopGroup( diff --git a/server/build.gradle b/server/build.gradle index c19e171c90f96..718fe034a0ca6 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -122,9 +122,31 @@ dependencies { api "io.projectreactor:reactor-core:${versions.reactor}" api "org.reactivestreams:reactive-streams:${versions.reactivestreams}" - // protobuf + // protobuf java utils api "com.google.protobuf:protobuf-java:${versions.protobuf}" - api "jakarta.annotation:jakarta.annotation-api:${versions.jakarta_annotation}" + api "com.google.protobuf:protobuf-java-util:${versions.protobuf}" + + // gRPC generated service code + api "io.grpc:grpc-protobuf-lite:${versions.grpc}" + api "io.grpc:grpc-api:${versions.grpc}" + api "io.grpc:grpc-stub:${versions.grpc}" + api "io.grpc:grpc-protobuf:${versions.grpc}" + api "io.perfmark:perfmark-api:${versions.perfmark_api}" + + // protobuf/gRPC annotations + compileOnly "javax.annotation:javax.annotation-api:${versions.javax_annotations}" + compileOnly "jakarta.annotation:jakarta.annotation-api:${versions.jakarta_annotation}" + compileOnly "com.google.code.findbugs:jsr305:${versions.jsr305}" + compileOnly "com.google.errorprone:error_prone_annotations:${versions.error_prone_annotations}" + + // Guava (explicit standard-jvm variant) + api "com.google.guava:failureaccess:${versions.failureaccess}" + api("com.google.guava:guava:${versions.guava}") { + attributes { + attribute(org.gradle.api.attributes.java.TargetJvmEnvironment.TARGET_JVM_ENVIRONMENT_ATTRIBUTE, + objects.named(org.gradle.api.attributes.java.TargetJvmEnvironment, 'standard-jvm')) + } + } // https://mvnrepository.com/artifact/org.roaringbitmap/RoaringBitmap implementation 'org.roaringbitmap:RoaringBitmap:1.3.0' @@ -224,12 +246,19 @@ protobuf { artifact = "com.google.protobuf:protoc:${versions.protobuf}" } + plugins { + grpc { + artifact = "io.grpc:protoc-gen-grpc-java:${versions.grpc}" + } + } + generateProtoTasks { all().each { task -> task.builtins { - java { - option "annotate_code" - } + java {} + } + task.plugins { + grpc {} } } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodeInfo.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodeInfo.java index 544fd1fb6aaf4..2841382d64e53 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodeInfo.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodeInfo.java @@ -43,6 +43,7 @@ import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.service.ReportingService; import org.opensearch.http.HttpInfo; +import org.opensearch.grpc.GrpcInfo; import org.opensearch.ingest.IngestInfo; import org.opensearch.monitor.jvm.JvmInfo; import org.opensearch.monitor.os.OsInfo; @@ -97,6 +98,7 @@ public NodeInfo(StreamInput in) throws IOException { addInfoIfNonNull(ThreadPoolInfo.class, in.readOptionalWriteable(ThreadPoolInfo::new)); addInfoIfNonNull(TransportInfo.class, in.readOptionalWriteable(TransportInfo::new)); addInfoIfNonNull(HttpInfo.class, in.readOptionalWriteable(HttpInfo::new)); + addInfoIfNonNull(GrpcInfo.class, in.readOptionalWriteable(GrpcInfo::new)); addInfoIfNonNull(PluginsAndModules.class, in.readOptionalWriteable(PluginsAndModules::new)); addInfoIfNonNull(IngestInfo.class, in.readOptionalWriteable(IngestInfo::new)); addInfoIfNonNull(AggregationInfo.class, in.readOptionalWriteable(AggregationInfo::new)); @@ -116,6 +118,7 @@ public NodeInfo( @Nullable ThreadPoolInfo threadPool, @Nullable TransportInfo transport, @Nullable HttpInfo http, + @Nullable GrpcInfo grpc, @Nullable PluginsAndModules plugins, @Nullable IngestInfo ingest, @Nullable AggregationInfo aggsInfo, @@ -132,6 +135,7 @@ public NodeInfo( addInfoIfNonNull(ThreadPoolInfo.class, threadPool); addInfoIfNonNull(TransportInfo.class, transport); addInfoIfNonNull(HttpInfo.class, http); + addInfoIfNonNull(GrpcInfo.class, grpc); addInfoIfNonNull(PluginsAndModules.class, plugins); addInfoIfNonNull(IngestInfo.class, ingest); addInfoIfNonNull(AggregationInfo.class, aggsInfo); @@ -221,6 +225,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(getInfo(ThreadPoolInfo.class)); out.writeOptionalWriteable(getInfo(TransportInfo.class)); out.writeOptionalWriteable(getInfo(HttpInfo.class)); + out.writeOptionalWriteable(getInfo(GrpcInfo.class)); out.writeOptionalWriteable(getInfo(PluginsAndModules.class)); out.writeOptionalWriteable(getInfo(IngestInfo.class)); out.writeOptionalWriteable(getInfo(AggregationInfo.class)); @@ -254,6 +259,7 @@ private Builder(Version version, Build build, DiscoveryNode node) { private ThreadPoolInfo threadPool; private TransportInfo transport; private HttpInfo http; + private GrpcInfo grpc; private PluginsAndModules plugins; private IngestInfo ingest; private AggregationInfo aggsInfo; @@ -295,6 +301,11 @@ public Builder setHttp(HttpInfo http) { return this; } + public Builder setGrpc(GrpcInfo grpc) { + this.grpc = grpc; + return this; + } + public Builder setPlugins(PluginsAndModules plugins) { this.plugins = plugins; return this; @@ -332,6 +343,7 @@ public NodeInfo build() { threadPool, transport, http, + grpc, plugins, ingest, aggsInfo, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequest.java index 26b53e8db642f..2d40f56f36a36 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoRequest.java @@ -178,6 +178,7 @@ public enum Metric { THREAD_POOL("thread_pool"), TRANSPORT("transport"), HTTP("http"), + GRPC("grpc"), PLUGINS("plugins"), INGEST("ingest"), AGGREGATIONS("aggregations"), diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoResponse.java index 7ddd70185e8ad..30e8226acd58c 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/NodesInfoResponse.java @@ -43,6 +43,7 @@ import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.grpc.GrpcInfo; import org.opensearch.http.HttpInfo; import org.opensearch.ingest.IngestInfo; import org.opensearch.monitor.jvm.JvmInfo; @@ -140,6 +141,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (nodeInfo.getInfo(HttpInfo.class) != null) { nodeInfo.getInfo(HttpInfo.class).toXContent(builder, params); } + if (nodeInfo.getInfo(GrpcInfo.class) != null) { + nodeInfo.getInfo(GrpcInfo.class).toXContent(builder, params); + } if (nodeInfo.getInfo(PluginsAndModules.class) != null) { nodeInfo.getInfo(PluginsAndModules.class).toXContent(builder, params); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java index dda54cce334ec..bab11cb55fed2 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java @@ -114,6 +114,7 @@ protected NodeInfo nodeOperation(NodeInfoRequest nodeRequest) { metrics.contains(NodesInfoRequest.Metric.THREAD_POOL.metricName()), metrics.contains(NodesInfoRequest.Metric.TRANSPORT.metricName()), metrics.contains(NodesInfoRequest.Metric.HTTP.metricName()), + metrics.contains(NodesInfoRequest.Metric.GRPC.metricName()), metrics.contains(NodesInfoRequest.Metric.PLUGINS.metricName()), metrics.contains(NodesInfoRequest.Metric.INGEST.metricName()), metrics.contains(NodesInfoRequest.Metric.AGGREGATIONS.metricName()), diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java index eb79e3403a25c..068a8c160f076 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java @@ -46,6 +46,7 @@ import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.discovery.DiscoveryStats; +import org.opensearch.grpc.GrpcStats; import org.opensearch.http.HttpStats; import org.opensearch.index.SegmentReplicationRejectionStats; import org.opensearch.index.stats.IndexingPressureStats; @@ -106,6 +107,9 @@ public class NodeStats extends BaseNodeResponse implements ToXContentFragment { @Nullable private HttpStats http; + @Nullable + private GrpcStats grpc; + @Nullable private AllCircuitBreakerStats breaker; @@ -179,6 +183,7 @@ public NodeStats(StreamInput in) throws IOException { fs = in.readOptionalWriteable(FsInfo::new); transport = in.readOptionalWriteable(TransportStats::new); http = in.readOptionalWriteable(HttpStats::new); + grpc = in.readOptionalWriteable(GrpcStats::new); breaker = in.readOptionalWriteable(AllCircuitBreakerStats::new); scriptStats = in.readOptionalWriteable(ScriptStats::new); discoveryStats = in.readOptionalWriteable(DiscoveryStats::new); @@ -265,6 +270,7 @@ public NodeStats( @Nullable FsInfo fs, @Nullable TransportStats transport, @Nullable HttpStats http, + @Nullable GrpcStats grpc, @Nullable AllCircuitBreakerStats breaker, @Nullable ScriptStats scriptStats, @Nullable DiscoveryStats discoveryStats, @@ -296,6 +302,7 @@ public NodeStats( this.fs = fs; this.transport = transport; this.http = http; + this.grpc = grpc; this.breaker = breaker; this.scriptStats = scriptStats; this.discoveryStats = discoveryStats; @@ -385,6 +392,11 @@ public HttpStats getHttp() { return this.http; } + @Nullable + public GrpcStats getGrpc() { + return this.grpc; + } + @Nullable public AllCircuitBreakerStats getBreaker() { return this.breaker; @@ -500,6 +512,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(fs); out.writeOptionalWriteable(transport); out.writeOptionalWriteable(http); + out.writeOptionalWriteable(grpc); out.writeOptionalWriteable(breaker); out.writeOptionalWriteable(scriptStats); out.writeOptionalWriteable(discoveryStats); @@ -592,6 +605,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (getHttp() != null) { getHttp().toXContent(builder, params); } + if (getGrpc() != null) { + getGrpc().toXContent(builder, params); + } if (getBreaker() != null) { getBreaker().toXContent(builder, params); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java index a5b00ed82d3cb..153d84b53d2c5 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java @@ -202,6 +202,7 @@ public enum Metric { FS("fs"), TRANSPORT("transport"), HTTP("http"), + GRPC("grpc"), BREAKER("breaker"), SCRIPT("script"), DISCOVERY("discovery"), diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index a98d245af872b..9d8791589e6f3 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -111,6 +111,7 @@ protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) { NodesStatsRequest.Metric.FS.containedIn(metrics), NodesStatsRequest.Metric.TRANSPORT.containedIn(metrics), NodesStatsRequest.Metric.HTTP.containedIn(metrics), + NodesStatsRequest.Metric.GRPC.containedIn(metrics), NodesStatsRequest.Metric.BREAKER.containedIn(metrics), NodesStatsRequest.Metric.SCRIPT.containedIn(metrics), NodesStatsRequest.Metric.DISCOVERY.containedIn(metrics), diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java index c6581b99eb559..17c32e0fcd31f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -169,7 +169,7 @@ protected ClusterStatsNodeResponse newNodeResponse(StreamInput in) throws IOExce @Override protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeRequest) { - NodeInfo nodeInfo = nodeService.info(true, true, false, true, false, true, false, true, false, false, false, false); + NodeInfo nodeInfo = nodeService.info(true, true, false, true, false, true, false, false, true, false, false, false, false); NodeStats nodeStats = nodeService.stats( CommonStatsFlags.NONE, isMetricRequired(Metric.OS, nodeRequest.request), @@ -182,6 +182,7 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq false, false, false, + false, isMetricRequired(Metric.INGEST, nodeRequest.request), false, false, diff --git a/server/src/main/java/org/opensearch/bootstrap/Security.java b/server/src/main/java/org/opensearch/bootstrap/Security.java index 53b1d990f9a0c..4e0971127224b 100644 --- a/server/src/main/java/org/opensearch/bootstrap/Security.java +++ b/server/src/main/java/org/opensearch/bootstrap/Security.java @@ -36,7 +36,9 @@ import org.opensearch.common.SuppressForbidden; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.env.Environment; +import org.opensearch.grpc.GrpcTransportSettings; import org.opensearch.http.HttpTransportSettings; import org.opensearch.plugins.PluginInfo; import org.opensearch.plugins.PluginsService; @@ -69,6 +71,7 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import static org.opensearch.common.util.FeatureFlags.GRPC_ENABLE_SETTING; import static org.opensearch.bootstrap.FilePermissionUtils.addDirectoryPath; import static org.opensearch.bootstrap.FilePermissionUtils.addSingleFilePath; @@ -402,6 +405,10 @@ static void addFilePermissions(Permissions policy, Environment environment) thro private static void addBindPermissions(Permissions policy, Settings settings) { addSocketPermissionForHttp(policy, settings); addSocketPermissionForTransportProfiles(policy, settings); + + if (FeatureFlags.isEnabled(GRPC_ENABLE_SETTING)) { + addSocketPermissionForGrpc(policy, settings); + } } /** @@ -416,6 +423,17 @@ private static void addSocketPermissionForHttp(final Permissions policy, final S addSocketPermissionForPortRange(policy, httpRange); } + /** + * Add dynamic {@link SocketPermission} based on gRPC settings. + * + * @param policy the {@link Permissions} instance to apply the dynamic {@link SocketPermission}s to. + * @param settings the {@link Settings} instance to read the gRPC settings from + */ + private static void addSocketPermissionForGrpc(final Permissions policy, final Settings settings) { + final String grpcRange = GrpcTransportSettings.SETTING_GRPC_PORT.get(settings).getPortRangeString(); + addSocketPermissionForPortRange(policy, grpcRange); + } + /** * Add dynamic {@link SocketPermission} based on transport settings. This method will first check if there is a port range specified in * the transport profile specified by {@code profileSettings} and will fall back to {@code settings}. diff --git a/server/src/main/java/org/opensearch/common/network/NetworkModule.java b/server/src/main/java/org/opensearch/common/network/NetworkModule.java index bb8da190a6f35..d0a21905a2ccb 100644 --- a/server/src/main/java/org/opensearch/common/network/NetworkModule.java +++ b/server/src/main/java/org/opensearch/common/network/NetworkModule.java @@ -52,6 +52,8 @@ import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.grpc.GrpcServerTransport; +import org.opensearch.grpc.services.GrpcServiceRegistry; import org.opensearch.http.HttpServerTransport; import org.opensearch.index.shard.PrimaryReplicaSyncer.ResyncTask; import org.opensearch.plugins.NetworkPlugin; @@ -157,6 +159,7 @@ public final class NetworkModule { private final Map> transportFactories = new HashMap<>(); private final Map> transportHttpFactories = new HashMap<>(); + private final Map> transportGrpcFactories = new HashMap<>(); private final List transportInterceptors = new ArrayList<>(); /** @@ -172,6 +175,7 @@ public NetworkModule( CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, NamedXContentRegistry xContentRegistry, + GrpcServiceRegistry grpcServiceRegistry, NetworkService networkService, HttpServerTransport.Dispatcher dispatcher, ClusterSettings clusterSettings, @@ -222,6 +226,15 @@ public NetworkModule( registerHttpTransport(entry.getKey(), entry.getValue()); } + Map> GrpcTransportFactory = plugin.getGrpcTransports( + settings, + networkService, + grpcServiceRegistry + ); + for (Map.Entry> entry : GrpcTransportFactory.entrySet()) { + registerGrpcTransport(entry.getKey(), entry.getValue()); + } + Map> transportFactory = plugin.getTransports( settings, threadPool, @@ -305,6 +318,12 @@ private void registerHttpTransport(String key, Supplier fac } } + private void registerGrpcTransport(String key, Supplier factory) { + if (transportGrpcFactories.putIfAbsent(key, factory) != null) { + throw new IllegalArgumentException("transport for name: " + key + " is already registered"); + } + } + /** * Register an allocation command. *

@@ -346,6 +365,16 @@ public Supplier getHttpServerTransportSupplier() { return factory; } + public Supplier getGrpcServerTransportSupplier() { + // TODO: Hacking in gRPC key from Netty4ModulePlugin + final String name = "netty4-grpc"; + final Supplier factory = transportGrpcFactories.get(name); + if (factory == null) { + throw new IllegalStateException("Unsupported grpc.type [" + name + "]"); + } + return factory; + } + public Supplier getTransportSupplier() { final String name; if (TRANSPORT_TYPE_SETTING.exists(settings)) { diff --git a/server/src/main/java/org/opensearch/common/network/NetworkService.java b/server/src/main/java/org/opensearch/common/network/NetworkService.java index deec184f702bf..9ca9a0cdff018 100644 --- a/server/src/main/java/org/opensearch/common/network/NetworkService.java +++ b/server/src/main/java/org/opensearch/common/network/NetworkService.java @@ -34,8 +34,11 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.transport.BindTransportException; import java.io.IOException; import java.net.InetAddress; @@ -45,9 +48,13 @@ import java.util.HashSet; import java.util.List; import java.util.Objects; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.function.Function; +import static org.opensearch.grpc.GrpcTransportSettings.SETTING_GRPC_PORT; +import static org.opensearch.grpc.GrpcTransportSettings.SETTING_GRPC_PUBLISH_PORT; + /** * Core network service. * @@ -237,6 +244,43 @@ public InetAddress resolvePublishHostAddresses(String publishHosts[]) throws IOE return addresses[0]; } + /** + * Resolve the publishPort for a server provided a list of boundAddresses and a publishInetAddress. + * Resolution strategy is as follows: + * If a configured port exists resolve to that port. + * If a bound address matches the publishInetAddress resolve to that port. + * If a bound address is a wildcard address resolve to that port. + * If all bound addresses share the same port resolve to that port. + * + * @param publishPort -1 if no configured publish port exists + * @param boundAddresses addresses bound by the server + * @param publishInetAddress address published for the server + * @return Resolved port. If publishPort is negative and no port can be resolved return publishPort. + */ + public static int resolvePublishPort(int publishPort, List boundAddresses, InetAddress publishInetAddress) { + if (publishPort < 0) { + for (TransportAddress boundAddress : boundAddresses) { + InetAddress boundInetAddress = boundAddress.address().getAddress(); + if (boundInetAddress.isAnyLocalAddress() || boundInetAddress.equals(publishInetAddress)) { + publishPort = boundAddress.getPort(); + break; + } + } + } + + if (publishPort < 0) { + final Set ports = new HashSet<>(); + for (TransportAddress boundAddress : boundAddresses) { + ports.add(boundAddress.getPort()); + } + if (ports.size() == 1) { + publishPort = ports.iterator().next(); + } + } + + return publishPort; + } + /** resolves (and deduplicates) host specification */ private InetAddress[] resolveInetAddresses(String hosts[]) throws IOException { if (hosts.length == 0) { diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index 6df68013a8119..78744cb47d18f 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -128,6 +128,16 @@ public class FeatureFlags { Property.NodeScope ); + /** + * Gates the functionality of the Netty4 gRPC server. + */ + public static final String GRPC_EXPERIMENTAL = "opensearch.experimental.feature.grpc.enabled"; + public static final Setting GRPC_ENABLE_SETTING = Setting.boolSetting( + GRPC_EXPERIMENTAL, + true, + Property.NodeScope + ); + private static final List> ALL_FEATURE_FLAG_SETTINGS = List.of( REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING, EXTENSIONS_SETTING, @@ -138,7 +148,8 @@ public class FeatureFlags { STAR_TREE_INDEX_SETTING, APPLICATION_BASED_CONFIGURATION_TEMPLATES_SETTING, READER_WRITER_SPLIT_EXPERIMENTAL_SETTING, - TERM_VERSION_PRECOMMIT_ENABLE_SETTING + TERM_VERSION_PRECOMMIT_ENABLE_SETTING, + GRPC_ENABLE_SETTING ); /** diff --git a/server/src/main/java/org/opensearch/extensions/action/RegisterTransportActionsRequest.java b/server/src/main/java/org/opensearch/extensions/action/RegisterTransportActionsRequest.java index 7e559f9b948d3..a60a2d3dc1ec3 100644 --- a/server/src/main/java/org/opensearch/extensions/action/RegisterTransportActionsRequest.java +++ b/server/src/main/java/org/opensearch/extensions/action/RegisterTransportActionsRequest.java @@ -11,7 +11,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.extensions.proto.ExtensionIdentityProto.ExtensionIdentity; -import org.opensearch.extensions.proto.RegisterTransportActionsProto.RegisterTransportActions; +import org.opensearch.extensions.proto.RegisterTransportActionsProto; import org.opensearch.transport.TransportRequest; import java.io.IOException; @@ -25,16 +25,16 @@ * @opensearch.internal */ public class RegisterTransportActionsRequest extends TransportRequest { - private final RegisterTransportActions request; + private final RegisterTransportActionsProto.RegisterTransportActions request; public RegisterTransportActionsRequest(String uniqueId, Set transportActions) { ExtensionIdentity identity = ExtensionIdentity.newBuilder().setUniqueId(uniqueId).build(); - this.request = RegisterTransportActions.newBuilder().setIdentity(identity).addAllTransportActions(transportActions).build(); + this.request = RegisterTransportActionsProto.RegisterTransportActions.newBuilder().setIdentity(identity).addAllTransportActions(transportActions).build(); } public RegisterTransportActionsRequest(StreamInput in) throws IOException { super(in); - this.request = RegisterTransportActions.parseFrom(in.readByteArray()); + this.request = RegisterTransportActionsProto.RegisterTransportActions.parseFrom(in.readByteArray()); } public String getUniqueId() { diff --git a/server/src/main/java/org/opensearch/grpc/AbstractGrpcServerTransport.java b/server/src/main/java/org/opensearch/grpc/AbstractGrpcServerTransport.java new file mode 100644 index 0000000000000..e4eed9d3eed3d --- /dev/null +++ b/server/src/main/java/org/opensearch/grpc/AbstractGrpcServerTransport.java @@ -0,0 +1,132 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.grpc; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.transport.PortsRange; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.transport.BindTransportException; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.opensearch.common.network.NetworkService.resolvePublishPort; + +import static org.opensearch.grpc.GrpcTransportSettings.SETTING_GRPC_BIND_HOST; +import static org.opensearch.grpc.GrpcTransportSettings.SETTING_GRPC_MAX_CONTENT_LENGTH; +import static org.opensearch.grpc.GrpcTransportSettings.SETTING_GRPC_PORT; +import static org.opensearch.grpc.GrpcTransportSettings.SETTING_GRPC_PUBLISH_HOST; +import static org.opensearch.grpc.GrpcTransportSettings.SETTING_GRPC_PUBLISH_PORT; + +/** + * Base GrpcServer class + * + * @opensearch.internal + */ +public abstract class AbstractGrpcServerTransport extends AbstractLifecycleComponent implements GrpcServerTransport { + private static final Logger logger = LogManager.getLogger(AbstractGrpcServerTransport.class); + + private volatile BoundTransportAddress boundAddress; + + private final String[] bindHosts; + private final String[] publishHosts; + private final Settings settings; + private final NetworkService networkService; + + protected final PortsRange port; + protected final ByteSizeValue maxContentLength; + + protected AbstractGrpcServerTransport( + Settings settings, + NetworkService networkService + ) { + this.settings = settings; + this.networkService = networkService; + + List httpBindHost = SETTING_GRPC_BIND_HOST.get(settings); + this.bindHosts = (httpBindHost.isEmpty() ? NetworkService.GLOBAL_NETWORK_BIND_HOST_SETTING.get(settings) : httpBindHost).toArray( + Strings.EMPTY_ARRAY + ); + + List httpPublishHost = SETTING_GRPC_PUBLISH_HOST.get(settings); + this.publishHosts = (httpPublishHost.isEmpty() ? NetworkService.GLOBAL_NETWORK_PUBLISH_HOST_SETTING.get(settings) : httpPublishHost) + .toArray(Strings.EMPTY_ARRAY); + + this.port = SETTING_GRPC_PORT.get(settings); + this.maxContentLength = SETTING_GRPC_MAX_CONTENT_LENGTH.get(settings); + } + + @Override + public BoundTransportAddress boundAddress() { + return this.boundAddress; + } + + @Override + public GrpcInfo info() { + BoundTransportAddress boundTransportAddress = boundAddress(); + if (boundTransportAddress == null) { + return null; + } + return new GrpcInfo(boundTransportAddress, maxContentLength.getBytes()); + } + + // gRPC service definitions provided at bind + abstract protected TransportAddress bindAddress(InetAddress hostAddress, PortsRange portRange); + + protected void bindServer() { + InetAddress[] hostAddresses; + try { + hostAddresses = networkService.resolveBindHostAddresses(bindHosts); + } catch (IOException e) { + throw new BindTransportException("Failed to resolve host [" + Arrays.toString(bindHosts) + "]", e); + } + + List boundAddresses = new ArrayList<>(hostAddresses.length); + for (InetAddress address : hostAddresses) { + boundAddresses.add(bindAddress(address, port)); + } + + final InetAddress publishInetAddress; + try { + publishInetAddress = networkService.resolvePublishHostAddresses(publishHosts); + } catch (Exception e) { + throw new BindTransportException("Failed to resolve publish address", e); + } + + final int publishPort = resolvePublishPort(SETTING_GRPC_PUBLISH_PORT.get(settings), boundAddresses, publishInetAddress); + if (publishPort < 0) { + throw new BindTransportException( + "Failed to auto-resolve http publish port, multiple bound addresses " + + boundAddresses + + " with distinct ports and none of them matched the publish address (" + + publishInetAddress + + "). " + + "Please specify a unique port by setting " + + SETTING_GRPC_PORT.getKey() + + " or " + + SETTING_GRPC_PUBLISH_PORT.getKey() + ); + } + + TransportAddress publishAddress = new TransportAddress(new InetSocketAddress(publishInetAddress, publishPort)); + this.boundAddress = new BoundTransportAddress(boundAddresses.toArray(new TransportAddress[0]), publishAddress); + logger.info("{}", boundAddress); + } +} diff --git a/server/src/main/java/org/opensearch/grpc/GrpcInfo.java b/server/src/main/java/org/opensearch/grpc/GrpcInfo.java new file mode 100644 index 0000000000000..0a847a1bb6f26 --- /dev/null +++ b/server/src/main/java/org/opensearch/grpc/GrpcInfo.java @@ -0,0 +1,87 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.grpc; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.network.InetAddresses; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.service.ReportingService; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * Information about a grpc connection + * + * @opensearch.api + */ +@ExperimentalApi +public class GrpcInfo implements ReportingService.Info { + private final BoundTransportAddress address; + private final long maxContentLength; + + public GrpcInfo(StreamInput in) throws IOException { + this(new BoundTransportAddress(in), in.readLong()); + } + + public GrpcInfo(BoundTransportAddress address, long maxContentLength) { + this.address = address; + this.maxContentLength = maxContentLength; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + address.writeTo(out); + out.writeLong(maxContentLength); + } + + static final class Fields { + static final String GRPC = "grpc"; + static final String BOUND_ADDRESS = "bound_address"; + static final String PUBLISH_ADDRESS = "publish_address"; + static final String MAX_CONTENT_LENGTH = "max_content_length"; + static final String MAX_CONTENT_LENGTH_IN_BYTES = "max_content_length_in_bytes"; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(Fields.GRPC); + builder.array(Fields.BOUND_ADDRESS, (Object[]) address.boundAddresses()); + TransportAddress publishAddress = address.publishAddress(); + String publishAddressString = publishAddress.toString(); + String hostString = publishAddress.address().getHostString(); + if (InetAddresses.isInetAddress(hostString) == false) { + publishAddressString = hostString + '/' + publishAddress.toString(); + } + builder.field(Fields.PUBLISH_ADDRESS, publishAddressString); + builder.humanReadableField(Fields.MAX_CONTENT_LENGTH_IN_BYTES, Fields.MAX_CONTENT_LENGTH, maxContentLength()); + builder.endObject(); + return builder; + } + + public BoundTransportAddress address() { + return address; + } + + public BoundTransportAddress getAddress() { + return address(); + } + + public ByteSizeValue maxContentLength() { + return new ByteSizeValue(maxContentLength); + } + + public ByteSizeValue getMaxContentLength() { + return maxContentLength(); + } +} diff --git a/server/src/main/java/org/opensearch/grpc/GrpcServerTransport.java b/server/src/main/java/org/opensearch/grpc/GrpcServerTransport.java new file mode 100644 index 0000000000000..bffc876bcd8ce --- /dev/null +++ b/server/src/main/java/org/opensearch/grpc/GrpcServerTransport.java @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.grpc; + +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.lifecycle.LifecycleComponent; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.service.ReportingService; + +/** + * gRPC Transport server + * + * @opensearch.api + */ +@PublicApi(since = "3.0.0") +public interface GrpcServerTransport extends LifecycleComponent, ReportingService { + BoundTransportAddress boundAddress(); + GrpcInfo info(); + GrpcStats stats(); +} diff --git a/server/src/main/java/org/opensearch/grpc/GrpcStats.java b/server/src/main/java/org/opensearch/grpc/GrpcStats.java new file mode 100644 index 0000000000000..4693804f79535 --- /dev/null +++ b/server/src/main/java/org/opensearch/grpc/GrpcStats.java @@ -0,0 +1,60 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.grpc; + +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * Stats for gRPC connections + * + * @opensearch.api + */ +@PublicApi(since = "3.0.0") +public class GrpcStats implements Writeable, ToXContentFragment { + private final long totalRequestCount; + private final long totalActiveConnections; + + public GrpcStats(long totalRequestCount, long totalActiveConnections) { + this.totalRequestCount = totalRequestCount; + this.totalActiveConnections = totalActiveConnections; + } + + public GrpcStats(StreamInput in) throws IOException { + totalRequestCount = in.readVLong(); + totalActiveConnections = in.readVLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(totalRequestCount); + out.writeVLong(totalActiveConnections); + } + + static final class Fields { + static final String GRPC = "grpc"; + static final String CURRENT_OPEN = "current_open"; + static final String TOTAL_OPENED = "total_opened"; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(GrpcStats.Fields.GRPC); + builder.field(GrpcStats.Fields.CURRENT_OPEN, totalActiveConnections); + builder.field(GrpcStats.Fields.TOTAL_OPENED, totalRequestCount); + builder.endObject(); + return builder; + } +} diff --git a/server/src/main/java/org/opensearch/grpc/GrpcTransportSettings.java b/server/src/main/java/org/opensearch/grpc/GrpcTransportSettings.java new file mode 100644 index 0000000000000..0645d39e9214a --- /dev/null +++ b/server/src/main/java/org/opensearch/grpc/GrpcTransportSettings.java @@ -0,0 +1,70 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.grpc; + +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.transport.PortsRange; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; + +import java.util.List; +import java.util.function.Function; + +import static java.util.Collections.emptyList; +import static org.opensearch.common.settings.Setting.intSetting; +import static org.opensearch.common.settings.Setting.listSetting; + +/** + * Transport settings for gRPC connections + * + * @opensearch.internal + */ +public final class GrpcTransportSettings { + + public static final Setting SETTING_GRPC_PORT = new Setting<>( + "grpc.port", + "9400-9500", + PortsRange::new, + Property.NodeScope + ); + + public static final Setting SETTING_GRPC_PUBLISH_PORT = intSetting("grpc.publish_port", -1, -1, Property.NodeScope); + + public static final Setting> SETTING_GRPC_BIND_HOST = listSetting( + "grpc.bind_host", + List.of("0.0.0.0"), + Function.identity(), + Property.NodeScope + ); + + public static final Setting> SETTING_GRPC_HOST = listSetting( + "grpc.host", + emptyList(), + Function.identity(), + Property.NodeScope + ); + + public static final Setting> SETTING_GRPC_PUBLISH_HOST = listSetting( + "grpc.publish_host", + SETTING_GRPC_HOST, + Function.identity(), + Property.NodeScope + ); + + public static final Setting SETTING_GRPC_MAX_CONTENT_LENGTH = Setting.byteSizeSetting( + "grpc.max_content_length", + new ByteSizeValue(100, ByteSizeUnit.MB), + new ByteSizeValue(0, ByteSizeUnit.BYTES), + new ByteSizeValue(Integer.MAX_VALUE, ByteSizeUnit.BYTES), + Property.NodeScope + ); + + private GrpcTransportSettings() {} +} diff --git a/server/src/main/java/org/opensearch/grpc/services/DocumentServiceImpl.java b/server/src/main/java/org/opensearch/grpc/services/DocumentServiceImpl.java new file mode 100644 index 0000000000000..d0f81978b7adb --- /dev/null +++ b/server/src/main/java/org/opensearch/grpc/services/DocumentServiceImpl.java @@ -0,0 +1,88 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.grpc.services; + +import io.grpc.stub.StreamObserver; +import opensearch.protos.BulkRequest; +import opensearch.protos.BulkResponse; +import opensearch.protos.DeleteDocumentRequest; +import opensearch.protos.DeleteDocumentResponse; +import opensearch.protos.GetDocumentRequest; +import opensearch.protos.GetDocumentResponse; +import opensearch.protos.GetDocumentSourceRequest; +import opensearch.protos.GetDocumentSourceResponse; +import opensearch.protos.IndexBulkRequest; +import opensearch.protos.IndexBulkResponse; +import opensearch.protos.IndexDocumentCreateIdRequest; +import opensearch.protos.IndexDocumentCreateIdResponse; +import opensearch.protos.IndexDocumentIdRequest; +import opensearch.protos.IndexDocumentIdResponse; +import opensearch.protos.IndexDocumentRequest; +import opensearch.protos.IndexDocumentResponse; +import opensearch.protos.UpdateDocumentRequest; +import opensearch.protos.UpdateDocumentResponse; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.client.node.NodeClient; + +import opensearch.proto.services.DocumentServiceGrpc; + +public class DocumentServiceImpl extends DocumentServiceGrpc.DocumentServiceImplBase { + private static final Logger logger = LogManager.getLogger(DocumentServiceImpl.class); + private final NodeClient client; + + public DocumentServiceImpl(NodeClient client) { + this.client = client; + } + + @Override + public void indexDocument(IndexDocumentRequest request, StreamObserver responseObserver) { + super.indexDocument(request, responseObserver); + } + + @Override + public void indexDocumentId(IndexDocumentIdRequest request, StreamObserver responseObserver) { + super.indexDocumentId(request, responseObserver); + } + + @Override + public void indexDocumentCreateId(IndexDocumentCreateIdRequest request, StreamObserver responseObserver) { + super.indexDocumentCreateId(request, responseObserver); + } + + @Override + public void bulk(BulkRequest request, StreamObserver responseObserver) { + super.bulk(request, responseObserver); + } + + @Override + public void indexBulk(IndexBulkRequest request, StreamObserver responseObserver) { + super.indexBulk(request, responseObserver); + } + + @Override + public void deleteDocument(DeleteDocumentRequest request, StreamObserver responseObserver) { + super.deleteDocument(request, responseObserver); + } + + @Override + public void updateDocument(UpdateDocumentRequest request, StreamObserver responseObserver) { + super.updateDocument(request, responseObserver); + } + + @Override + public void getDocument(GetDocumentRequest request, StreamObserver responseObserver) { + super.getDocument(request, responseObserver); + } + + @Override + public void getDocumentSource(GetDocumentSourceRequest request, StreamObserver responseObserver) { + super.getDocumentSource(request, responseObserver); + } +} diff --git a/server/src/main/java/org/opensearch/grpc/services/GrpcServiceRegistry.java b/server/src/main/java/org/opensearch/grpc/services/GrpcServiceRegistry.java new file mode 100644 index 0000000000000..1ee45815fe511 --- /dev/null +++ b/server/src/main/java/org/opensearch/grpc/services/GrpcServiceRegistry.java @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.grpc.services; + +import io.grpc.BindableService; +import java.util.List; + +/* +TODO: Service validation? +TODO: Handle compatibility/errors/dups here before we inject services into gRPC server + */ +public class GrpcServiceRegistry { + + private final List services; + + public GrpcServiceRegistry(BindableService... services) { + this.services = List.of(services); + } + + public List getServices() { + return services; + } +} diff --git a/server/src/main/java/org/opensearch/grpc/services/NodesInfoServiceImpl.java b/server/src/main/java/org/opensearch/grpc/services/NodesInfoServiceImpl.java new file mode 100644 index 0000000000000..086d4b1455d3c --- /dev/null +++ b/server/src/main/java/org/opensearch/grpc/services/NodesInfoServiceImpl.java @@ -0,0 +1,137 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.grpc.services; + +import io.grpc.stub.StreamObserver; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.admin.cluster.node.info.NodeInfo; +import org.opensearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.opensearch.action.admin.cluster.node.info.PluginsAndModules; +import org.opensearch.action.admin.cluster.node.info.proto.NodesInfoProto; +import org.opensearch.client.node.NodeClient; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.core.service.ReportingService; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.grpc.GrpcInfo; +import org.opensearch.http.HttpInfo; +import org.opensearch.ingest.IngestInfo; +import org.opensearch.monitor.jvm.JvmInfo; +import org.opensearch.monitor.os.OsInfo; +import org.opensearch.monitor.process.ProcessInfo; +import org.opensearch.search.aggregations.support.AggregationInfo; +import org.opensearch.search.pipeline.SearchPipelineInfo; +import org.opensearch.threadpool.ThreadPoolInfo; +import org.opensearch.transport.TransportInfo; + +import java.io.IOException; +import java.util.Map; +import java.util.Optional; + +import opensearch.proto.services.NodesInfoServiceGrpc; + +import static org.opensearch.core.xcontent.ToXContent.EMPTY_PARAMS; + +public class NodesInfoServiceImpl extends NodesInfoServiceGrpc.NodesInfoServiceImplBase { + private static final Logger logger = LogManager.getLogger(NodesInfoServiceImpl.class); + private final NodeClient client; + + public NodesInfoServiceImpl(NodeClient client) { + this.client = client; + } + + @Override + public void nodesInfo(NodesInfoProto.NodesInfoRequestProto request, StreamObserver responseObserver) { + NodesInfoResponse response = client.admin().cluster() + .nodesInfo(reqFromProto(request)) + .actionGet(); + responseObserver.onNext(respToProto(response)); + responseObserver.onCompleted(); + } + + private static NodesInfoRequest reqFromProto(NodesInfoProto.NodesInfoRequestProto request) { + String[] nodeIds = request.getNodeIdsList().toArray(new String[0]); + NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(nodeIds); + nodesInfoRequest.timeout(request.getTimeout()); + + if (request.getMetrics().getAll()) { + nodesInfoRequest.all(); + } else { + for (String met : request.getMetrics().getMetricList().getStrListList()) { + nodesInfoRequest.addMetric(met); + } + } + + return nodesInfoRequest; + } + + private static NodesInfoProto.NodesInfoResponseProto respToProto(NodesInfoResponse response) { + NodesInfoProto.NodesInfoResponseProto.Builder builder = NodesInfoProto.NodesInfoResponseProto.newBuilder(); + + for (NodeInfo ni : response.getNodes()) { + NodesInfoProto.NodesInfo.Builder nib = NodesInfoProto.NodesInfo.newBuilder() + .setNodeId(ni.getNode().getId()) + .setNodeName(ni.getNode().getName()) + .setTransport(ni.getNode().getAddress().toString()) + .setHost(ni.getNode().getHostName()) + .setIp(ni.getNode().getHostAddress()) + .setIp(ni.getVersion().toString()) + .setBuildType(ni.getBuild().type().displayName()) + .setBuildType(ni.getBuild().hash()); + + if (ni.getTotalIndexingBuffer() != null) { + nib.setTotalIndexingBuffer(ni.getTotalIndexingBuffer().toString()); + } + + for (DiscoveryNodeRole role : ni.getNode().getRoles()) { + nib.addRoles(role.roleName()); + } + + for (Map.Entry entry : ni.getNode().getAttributes().entrySet()) { + nib.putAttributes(entry.getKey(), entry.getValue()); + } + + try { + XContentBuilder settingsBuilder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); + String settingsXCont = ni.getSettings().toXContent(settingsBuilder, EMPTY_PARAMS).toString(); + nib.setSettings(settingsXCont); + } catch (IOException e) { + logger.debug("Failed to build NodesInfo settings: " + e); + } + + Optional.ofNullable(getNodeInfoJSON(ni, OsInfo.class)).ifPresent(nib::setOs); + Optional.ofNullable(getNodeInfoJSON(ni, ProcessInfo.class)).ifPresent(nib::setProcess); + Optional.ofNullable(getNodeInfoJSON(ni, JvmInfo.class)).ifPresent(nib::setJvm); + Optional.ofNullable(getNodeInfoJSON(ni, ThreadPoolInfo.class)).ifPresent(nib::setThreadPool); + Optional.ofNullable(getNodeInfoJSON(ni, TransportInfo.class)).ifPresent(nib::setTransport); + Optional.ofNullable(getNodeInfoJSON(ni, HttpInfo.class)).ifPresent(nib::setHttp); + Optional.ofNullable(getNodeInfoJSON(ni, GrpcInfo.class)).ifPresent(nib::setGrpc); + Optional.ofNullable(getNodeInfoJSON(ni, PluginsAndModules.class)).ifPresent(nib::setPlugins); + Optional.ofNullable(getNodeInfoJSON(ni, IngestInfo.class)).ifPresent(nib::setIngest); + Optional.ofNullable(getNodeInfoJSON(ni, AggregationInfo.class)).ifPresent(nib::setAggs); + Optional.ofNullable(getNodeInfoJSON(ni, SearchPipelineInfo.class)).ifPresent(nib::setSearchPipelines); + + builder.addNodesInfo(nib); + } + + return builder.build(); + } + + private static String getNodeInfoJSON(NodeInfo n, Class clazz) { + try { + XContentBuilder settingsBuilder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); + return n.getInfo(OsInfo.class).toXContent(settingsBuilder, EMPTY_PARAMS).toString(); + } catch (IOException e) { + logger.debug("Failed to build NodesInfo os: " + e); + } + return null; + } +} diff --git a/server/src/main/java/org/opensearch/grpc/services/SearchServiceImpl.java b/server/src/main/java/org/opensearch/grpc/services/SearchServiceImpl.java new file mode 100644 index 0000000000000..d5ac3c6e6f0d6 --- /dev/null +++ b/server/src/main/java/org/opensearch/grpc/services/SearchServiceImpl.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.grpc.services; + +import io.grpc.stub.StreamObserver; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.client.node.NodeClient; + +import opensearch.proto.services.SearchServiceGrpc; +import opensearch.protos.ExplainRequest; +import opensearch.protos.ExplainResponse; +import opensearch.protos.IndexSearchRequest; +import opensearch.protos.IndexSearchResponse; +import opensearch.protos.SearchRequest; +import opensearch.protos.SearchResponse; + +public class SearchServiceImpl extends SearchServiceGrpc.SearchServiceImplBase { + private static final Logger logger = LogManager.getLogger(SearchServiceImpl.class); + private final NodeClient client; + + public SearchServiceImpl(NodeClient client) { + this.client = client; + } + + @Override + public void search(SearchRequest request, StreamObserver responseObserver) { + super.search(request, responseObserver); + } + + @Override + public void indexSearch(IndexSearchRequest request, StreamObserver responseObserver) { + super.indexSearch(request, responseObserver); + } + + @Override + public void explain(ExplainRequest request, StreamObserver responseObserver) { + super.explain(request, responseObserver); + } +} diff --git a/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java b/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java index 991fbf12072be..97bca2525c325 100644 --- a/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java +++ b/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java @@ -71,7 +71,6 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -79,6 +78,8 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import static org.opensearch.common.network.NetworkService.resolvePublishPort; + import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_BIND_HOST; import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH; import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_PORT; @@ -192,7 +193,21 @@ protected void bindServer() { throw new BindTransportException("Failed to resolve publish address", e); } - final int publishPort = resolvePublishPort(settings, boundAddresses, publishInetAddress); + final int publishPort = resolvePublishPort(SETTING_HTTP_PUBLISH_PORT.get(settings), boundAddresses, publishInetAddress); + if (publishPort < 0) { + throw new BindHttpException( + "Failed to auto-resolve http publish port, multiple bound addresses " + + boundAddresses + + " with distinct ports and none of them matched the publish address (" + + publishInetAddress + + "). " + + "Please specify a unique port by setting " + + SETTING_HTTP_PORT.getKey() + + " or " + + SETTING_HTTP_PUBLISH_PORT.getKey() + ); + } + TransportAddress publishAddress = new TransportAddress(new InetSocketAddress(publishInetAddress, publishPort)); this.boundAddress = new BoundTransportAddress(boundAddresses.toArray(new TransportAddress[0]), publishAddress); logger.info("{}", boundAddress); @@ -258,47 +273,6 @@ protected void doClose() {} */ protected abstract void stopInternal(); - // package private for tests - static int resolvePublishPort(Settings settings, List boundAddresses, InetAddress publishInetAddress) { - int publishPort = SETTING_HTTP_PUBLISH_PORT.get(settings); - - if (publishPort < 0) { - for (TransportAddress boundAddress : boundAddresses) { - InetAddress boundInetAddress = boundAddress.address().getAddress(); - if (boundInetAddress.isAnyLocalAddress() || boundInetAddress.equals(publishInetAddress)) { - publishPort = boundAddress.getPort(); - break; - } - } - } - - // if no matching boundAddress found, check if there is a unique port for all bound addresses - if (publishPort < 0) { - final Set ports = new HashSet<>(); - for (TransportAddress boundAddress : boundAddresses) { - ports.add(boundAddress.getPort()); - } - if (ports.size() == 1) { - publishPort = ports.iterator().next(); - } - } - - if (publishPort < 0) { - throw new BindHttpException( - "Failed to auto-resolve http publish port, multiple bound addresses " - + boundAddresses - + " with distinct ports and none of them matched the publish address (" - + publishInetAddress - + "). " - + "Please specify a unique port by setting " - + SETTING_HTTP_PORT.getKey() - + " or " - + SETTING_HTTP_PUBLISH_PORT.getKey() - ); - } - return publishPort; - } - public void onException(HttpChannel channel, Exception e) { channel.handleException(e); if (lifecycle.started() == false) { diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index e74fca60b0201..7ed72328df9c9 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -145,6 +145,11 @@ import org.opensearch.gateway.ShardsBatchGatewayAllocator; import org.opensearch.gateway.remote.RemoteClusterStateCleanupManager; import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.grpc.GrpcServerTransport; +import org.opensearch.grpc.services.DocumentServiceImpl; +import org.opensearch.grpc.services.GrpcServiceRegistry; +import org.opensearch.grpc.services.NodesInfoServiceImpl; +import org.opensearch.grpc.services.SearchServiceImpl; import org.opensearch.http.HttpServerTransport; import org.opensearch.identity.IdentityService; import org.opensearch.index.IndexModule; @@ -312,6 +317,7 @@ import static java.util.stream.Collectors.toList; import static org.opensearch.common.util.FeatureFlags.BACKGROUND_TASK_EXECUTION_EXPERIMENTAL; +import static org.opensearch.common.util.FeatureFlags.GRPC_ENABLE_SETTING; import static org.opensearch.common.util.FeatureFlags.TELEMETRY; import static org.opensearch.env.NodeEnvironment.collectFileCacheDataPath; import static org.opensearch.index.ShardIndexingPressureSettings.SHARD_INDEXING_PRESSURE_ENABLED_ATTRIBUTE_KEY; @@ -1131,6 +1137,13 @@ protected Node( admissionControlTransportInterceptor, workloadManagementTransportInterceptor ); + + GrpcServiceRegistry grpcReg = new GrpcServiceRegistry( + new SearchServiceImpl(client), + new DocumentServiceImpl(client), + new NodesInfoServiceImpl(client) + ); + final NetworkModule networkModule = new NetworkModule( settings, pluginsService.filterPlugins(NetworkPlugin.class), @@ -1140,6 +1153,7 @@ protected Node( circuitBreakerService, namedWriteableRegistry, xContentRegistry, + grpcReg, networkService, restController, clusterService.getClusterSettings(), @@ -1198,6 +1212,7 @@ protected Node( SearchExecutionStatsCollector.makeWrapper(responseCollectorService) ); final HttpServerTransport httpServerTransport = newHttpTransport(networkModule); + final GrpcServerTransport grpcServerTransport = newGrpcTransport(networkModule); final IndexingPressureService indexingPressureService = new IndexingPressureService(settings, clusterService); // Going forward, IndexingPressureService will have required constructs for exposing listeners/interfaces for plugin // development. Then we can deprecate Getter and Setter for IndexingPressureService in ClusterService (#478). @@ -1339,6 +1354,7 @@ protected Node( circuitBreakerService, scriptService, httpServerTransport, + grpcServerTransport, ingestService, clusterService, settingsModule.getSettingsFilter(), @@ -1475,6 +1491,7 @@ protected Node( .toInstance(new SegmentReplicationSourceService(indicesService, transportService, recoverySettings)); } b.bind(HttpServerTransport.class).toInstance(httpServerTransport); + b.bind(GrpcServerTransport.class).toInstance(grpcServerTransport); pluginComponents.stream().forEach(p -> b.bind((Class) p.getClass()).toInstance(p)); b.bind(PersistentTasksService.class).toInstance(persistentTasksService); b.bind(PersistentTasksClusterService.class).toInstance(persistentTasksClusterService); @@ -1749,11 +1766,19 @@ public void onTimeout(TimeValue timeout) { injector.getInstance(HttpServerTransport.class).start(); + if (FeatureFlags.isEnabled(GRPC_ENABLE_SETTING)) { + injector.getInstance(GrpcServerTransport.class).start(); + } + if (WRITE_PORTS_FILE_SETTING.get(settings())) { TransportService transport = injector.getInstance(TransportService.class); writePortsFile("transport", transport.boundAddress()); HttpServerTransport http = injector.getInstance(HttpServerTransport.class); writePortsFile("http", http.boundAddress()); + if (FeatureFlags.isEnabled(GRPC_ENABLE_SETTING)) { + GrpcServerTransport grpc = injector.getInstance(GrpcServerTransport.class); + writePortsFile("grpc", grpc.boundAddress()); + } } logger.info("started"); @@ -2095,6 +2120,11 @@ protected HttpServerTransport newHttpTransport(NetworkModule networkModule) { return networkModule.getHttpServerTransportSupplier().get(); } + /** Constructs a {@link org.opensearch.grpc.GrpcServerTransport} which may be mocked for tests. */ + protected GrpcServerTransport newGrpcTransport(NetworkModule networkModule) { + return networkModule.getGrpcServerTransportSupplier().get(); + } + private static class LocalNodeFactory implements Function { private final SetOnce localNode = new SetOnce<>(); private final String persistentNodeId; diff --git a/server/src/main/java/org/opensearch/node/NodeService.java b/server/src/main/java/org/opensearch/node/NodeService.java index 9671fda14375d..a893ada6eddbc 100644 --- a/server/src/main/java/org/opensearch/node/NodeService.java +++ b/server/src/main/java/org/opensearch/node/NodeService.java @@ -47,6 +47,7 @@ import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.discovery.Discovery; +import org.opensearch.grpc.GrpcServerTransport; import org.opensearch.http.HttpServerTransport; import org.opensearch.index.IndexingPressureService; import org.opensearch.index.SegmentReplicationStatsTracker; @@ -87,6 +88,7 @@ public class NodeService implements Closeable { private final SettingsFilter settingsFilter; private final ScriptService scriptService; private final HttpServerTransport httpServerTransport; + private final GrpcServerTransport grpcServerTransport; private final ResponseCollectorService responseCollectorService; private final ResourceUsageCollectorService resourceUsageCollectorService; private final SearchTransportService searchTransportService; @@ -114,6 +116,7 @@ public class NodeService implements Closeable { CircuitBreakerService circuitBreakerService, ScriptService scriptService, @Nullable HttpServerTransport httpServerTransport, + @Nullable GrpcServerTransport grpcServerTransport, IngestService ingestService, ClusterService clusterService, SettingsFilter settingsFilter, @@ -140,6 +143,7 @@ public class NodeService implements Closeable { this.pluginService = pluginService; this.circuitBreakerService = circuitBreakerService; this.httpServerTransport = httpServerTransport; + this.grpcServerTransport = grpcServerTransport; this.ingestService = ingestService; this.settingsFilter = settingsFilter; this.scriptService = scriptService; @@ -169,6 +173,7 @@ public NodeInfo info( boolean threadPool, boolean transport, boolean http, + boolean grpc, boolean plugin, boolean ingest, boolean aggs, @@ -197,6 +202,9 @@ public NodeInfo info( if (http && httpServerTransport != null) { builder.setHttp(httpServerTransport.info()); } + if (grpc && grpcServerTransport != null) { + builder.setGrpc(grpcServerTransport.info()); + } if (plugin && pluginService != null) { builder.setPlugins(pluginService.info()); } @@ -224,6 +232,7 @@ public NodeStats stats( boolean fs, boolean transport, boolean http, + boolean grpc, boolean circuitBreaker, boolean script, boolean discoveryStats, @@ -258,6 +267,7 @@ public NodeStats stats( fs ? monitorService.fsService().stats() : null, transport ? transportService.stats() : null, http ? (httpServerTransport == null ? null : httpServerTransport.stats()) : null, + grpc ? (grpcServerTransport == null ? null : grpcServerTransport.stats()) : null, circuitBreaker ? circuitBreakerService.stats() : null, script ? scriptService.stats() : null, discoveryStats ? discovery.stats() : null, diff --git a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java index 138ef6f71280d..4c7c05ef1803b 100644 --- a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java @@ -40,6 +40,8 @@ import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.grpc.GrpcServerTransport; +import org.opensearch.grpc.services.GrpcServiceRegistry; import org.opensearch.http.HttpServerTransport; import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; @@ -108,6 +110,17 @@ default Map> getHttpTransports( return Collections.emptyMap(); } + /** + * Returns a map of {@link GrpcServerTransport} suppliers. + */ + default Map> getGrpcTransports( + Settings settings, + NetworkService networkService, + GrpcServiceRegistry grpcServiceRegistry + ) { + return Collections.emptyMap(); + } + /** * Returns a map of secure {@link Transport} suppliers. * See {@link org.opensearch.common.network.NetworkModule#TRANSPORT_TYPE_KEY} to configure a specific implementation. diff --git a/server/src/main/proto/org/opensearch/action/admin/cluster/node/info/NodesInfoProto.proto b/server/src/main/proto/org/opensearch/action/admin/cluster/node/info/NodesInfoProto.proto new file mode 100644 index 0000000000000..3f6ff74d04b88 --- /dev/null +++ b/server/src/main/proto/org/opensearch/action/admin/cluster/node/info/NodesInfoProto.proto @@ -0,0 +1,63 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +syntax = "proto3"; +package org.opensearch.action.admin.cluster.node.info.proto; + +option java_outer_classname = "NodesInfoProto"; + +message NodesInfoRequestProto { + Metrics metrics = 1; + string timeout = 2; + repeated string nodeIds = 3; +} + +message NodesInfoResponseProto { + string clusterName = 1; + repeated NodesInfo nodesInfo = 2; +} + +message StringList { + repeated string str_list = 1; +} + +message Metrics { + bool all = 1; + StringList metric_list = 2; +} + +message NodesInfo { + string node_id = 1; + string node_name = 2; + string transport_address = 3; + string host = 4; + string ip = 5; + string version = 6; + string build_type = 7; + string build_hash = 8; + repeated string roles = 9; + map attributes = 10; + optional string total_indexing_buffer = 11; + + // TODO: Writing these more complex objects to strings in JSON + optional string settings = 12; + optional string os = 13; + optional string process = 14; + optional string jvm = 15; + optional string thread_pool = 16; + optional string transport = 17; + optional string http = 18; + optional string grpc = 19; + optional string plugins = 20; + optional string ingest = 21; + optional string aggs = 22; + optional string search_pipelines = 24; +} diff --git a/server/src/main/proto/extensions/ExtensionIdentityProto.proto b/server/src/main/proto/org/opensearch/extensions/ExtensionIdentityProto.proto similarity index 100% rename from server/src/main/proto/extensions/ExtensionIdentityProto.proto rename to server/src/main/proto/org/opensearch/extensions/ExtensionIdentityProto.proto diff --git a/server/src/main/proto/extensions/ExtensionRequestProto.proto b/server/src/main/proto/org/opensearch/extensions/ExtensionRequestProto.proto similarity index 86% rename from server/src/main/proto/extensions/ExtensionRequestProto.proto rename to server/src/main/proto/org/opensearch/extensions/ExtensionRequestProto.proto index 9526e4145b281..2a176bb8d8a2b 100644 --- a/server/src/main/proto/extensions/ExtensionRequestProto.proto +++ b/server/src/main/proto/org/opensearch/extensions/ExtensionRequestProto.proto @@ -12,7 +12,8 @@ syntax = "proto3"; package org.opensearch.extensions.proto; -import "extensions/ExtensionIdentityProto.proto"; +import "org/opensearch/extensions/ExtensionIdentityProto.proto"; + option java_outer_classname = "ExtensionRequestProto"; enum RequestType { @@ -28,6 +29,6 @@ enum RequestType { } message ExtensionRequest { - ExtensionIdentity identity = 1; + org.opensearch.extensions.proto.ExtensionIdentity identity = 1; RequestType requestType = 2; } diff --git a/server/src/main/proto/extensions/HandleTransportRequestProto.proto b/server/src/main/proto/org/opensearch/extensions/HandleTransportRequestProto.proto similarity index 100% rename from server/src/main/proto/extensions/HandleTransportRequestProto.proto rename to server/src/main/proto/org/opensearch/extensions/HandleTransportRequestProto.proto diff --git a/server/src/main/proto/extensions/RegisterRestActionsProto.proto b/server/src/main/proto/org/opensearch/extensions/RegisterRestActionsProto.proto similarity index 80% rename from server/src/main/proto/extensions/RegisterRestActionsProto.proto rename to server/src/main/proto/org/opensearch/extensions/RegisterRestActionsProto.proto index fa5599b118a8b..5f91229ebdf85 100644 --- a/server/src/main/proto/extensions/RegisterRestActionsProto.proto +++ b/server/src/main/proto/org/opensearch/extensions/RegisterRestActionsProto.proto @@ -12,11 +12,11 @@ syntax = "proto3"; package org.opensearch.extensions.proto; -import "extensions/ExtensionIdentityProto.proto"; +import "org/opensearch/extensions/ExtensionIdentityProto.proto"; option java_outer_classname = "RegisterRestActionsProto"; message RegisterRestActions { - ExtensionIdentity identity = 1; + org.opensearch.extensions.proto.ExtensionIdentity identity = 1; repeated string restActions = 2; repeated string deprecatedRestActions = 3; } diff --git a/server/src/main/proto/extensions/RegisterTransportActionsProto.proto b/server/src/main/proto/org/opensearch/extensions/RegisterTransportActionsProto.proto similarity index 79% rename from server/src/main/proto/extensions/RegisterTransportActionsProto.proto rename to server/src/main/proto/org/opensearch/extensions/RegisterTransportActionsProto.proto index a6ce252cedf88..26c52a5f63f43 100644 --- a/server/src/main/proto/extensions/RegisterTransportActionsProto.proto +++ b/server/src/main/proto/org/opensearch/extensions/RegisterTransportActionsProto.proto @@ -12,10 +12,11 @@ syntax = "proto3"; package org.opensearch.extensions.proto; -import "extensions/ExtensionIdentityProto.proto"; +import "org/opensearch/extensions/ExtensionIdentityProto.proto"; + option java_outer_classname = "RegisterTransportActionsProto"; message RegisterTransportActions { - ExtensionIdentity identity = 1; + org.opensearch.extensions.proto.ExtensionIdentity identity = 1; repeated string transportActions = 2; } diff --git a/server/src/main/proto/services/Document.proto b/server/src/main/proto/services/Document.proto new file mode 100644 index 0000000000000..a64e96b4c4f35 --- /dev/null +++ b/server/src/main/proto/services/Document.proto @@ -0,0 +1,33 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +syntax = "proto3"; +package opensearch.proto.services; + +import "spec/document.proto"; + +option java_outer_classname = "Document"; + +service DocumentService { + /*Consolidate to IndexDocument*/ + rpc IndexDocument(IndexDocumentRequest) returns (IndexDocumentResponse) {} + rpc IndexDocumentId(IndexDocumentIdRequest) returns (IndexDocumentIdResponse) {} + rpc IndexDocumentCreateId(IndexDocumentCreateIdRequest) returns (IndexDocumentCreateIdResponse) {} + + /*Consolidate to Bulk*/ + rpc Bulk(BulkRequest) returns (BulkResponse) {} + rpc IndexBulk(IndexBulkRequest) returns (IndexBulkResponse) {} + + rpc DeleteDocument(DeleteDocumentRequest) returns (DeleteDocumentResponse) {} + rpc UpdateDocument(UpdateDocumentRequest) returns (UpdateDocumentResponse) {} + rpc GetDocument(GetDocumentRequest) returns (GetDocumentResponse) {} + rpc GetDocumentSource(GetDocumentSourceRequest) returns (GetDocumentSourceResponse) {} +} diff --git a/server/src/main/proto/services/NodeInfo.proto b/server/src/main/proto/services/NodeInfo.proto new file mode 100644 index 0000000000000..4184b30424da4 --- /dev/null +++ b/server/src/main/proto/services/NodeInfo.proto @@ -0,0 +1,22 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +syntax = "proto3"; +package opensearch.proto.services; + +import "org/opensearch/action/admin/cluster/node/info/NodesInfoProto.proto"; + +option java_outer_classname = "NodesInfoProtoService"; + +service NodesInfoService { + rpc NodesInfo(org.opensearch.action.admin.cluster.node.info.proto.NodesInfoRequestProto) returns (org.opensearch.action.admin.cluster.node.info.proto.NodesInfoResponseProto) {} + rpc StreamNodesInfo(org.opensearch.action.admin.cluster.node.info.proto.NodesInfoRequestProto) returns (stream org.opensearch.action.admin.cluster.node.info.proto.NodesInfoResponseProto) {} +} diff --git a/server/src/main/proto/services/Search.proto b/server/src/main/proto/services/Search.proto new file mode 100644 index 0000000000000..2695dec823f85 --- /dev/null +++ b/server/src/main/proto/services/Search.proto @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +syntax = "proto3"; +package opensearch.proto.services; + +import "spec/search.proto"; + +option java_outer_classname = "Search"; + +service SearchService { + /*Consolidate to Search*/ + rpc Search(SearchRequest) returns (SearchResponse) {} + rpc IndexSearch(IndexSearchRequest) returns (IndexSearchResponse) {} + + rpc Explain(ExplainRequest) returns (ExplainResponse) {} +} diff --git a/server/src/main/proto/spec/common.proto b/server/src/main/proto/spec/common.proto new file mode 100644 index 0000000000000..6eb1f9e7fb0d3 --- /dev/null +++ b/server/src/main/proto/spec/common.proto @@ -0,0 +1,2710 @@ +syntax = "proto3"; + +option java_multiple_files = true; +option java_package = "opensearch.protos"; +option java_outer_classname = "CommonProto"; +option go_package = "opensearchpb"; + +import "google/protobuf/wrappers.proto"; +import "google/protobuf/struct.proto"; + + +message WaitForActiveShards { + + enum WaitForActiveShardOptions { + + WAIT_FOR_ACTIVE_SHARD_OPTIONS_INVALID = 0; + WAIT_FOR_ACTIVE_SHARD_OPTIONS_ALL = 1; + WAIT_FOR_ACTIVE_SHARD_OPTIONS_INDEX_SETTING = 2; + } + + oneof wait_for_active_shards { + .google.protobuf.Int32Value int32_value = 1; + WaitForActiveShardOptions wait_for_active_shard_options = 2; + } + +} + +message Script { + oneof script { + // Defines an inline script to execute as part of a query. + InlineScript inline_script = 1; + // References a stored script by its ID for use in a query. + StoredScriptId stored_script_id = 2; + } +} + +message InlineScript { + // [optional] + // The parameters that can be passed to the script. + ObjectMap params = 1; + + // [optional] + // The script's language. Default is painless. + ScriptLanguage lang = 2; + + map options = 3; + + // [required] + // The script source. + .google.protobuf.StringValue source = 4; +} + +message ScriptLanguage { + enum BuiltinScriptLanguage { + + BUILTIN_SCRIPT_LANGUAGE_INVALID = 0; + BUILTIN_SCRIPT_LANGUAGE_EXPRESSION = 1; + BUILTIN_SCRIPT_LANGUAGE_JAVA = 2; + BUILTIN_SCRIPT_LANGUAGE_MUSTACHE = 3; + BUILTIN_SCRIPT_LANGUAGE_PAINLESS = 4; + } + BuiltinScriptLanguage builtin_script_language = 1; + .google.protobuf.StringValue string_value = 2; +} + +message StoredScriptId { + // [optional] + // The parameters that can be passed to the script. + ObjectMap params = 1; + // [required] + // The ID of a stored script previously created using the Create Stored Script API. + .google.protobuf.StringValue id = 2; +} + +message ObjectMap { + map fields = 1; + + message Value { + // The kind of value. + oneof value { + // Represents a null value. + NullValue null_value = 1; + // Represents a .google.protobuf.DoubleValue value. + GeneralNumber general_number = 2; + // Represents a .google.protobuf.StringValue value. + .google.protobuf.StringValue string_value = 3; + // Represents a boolean value. + .google.protobuf.BoolValue bool_value = 4; + // Represents a structured value. + ObjectMap object_map = 5; + // Represents a repeated `Value`. + ListValue list_value = 6; + } + + } + + // `ListValue` is a wrapper around a repeated field of values. + // The JSON representation for `ListValue` is JSON array. + message ListValue { + // Repeated field of dynamically typed values. + repeated ValueWithoutWrappers value_without_wrappers = 1; + } + + message ValueWithoutWrappers { + oneof value_without_wrappers { + NullValue null_value = 1; + int32 int32 = 2; + int64 int64 = 3; + float float = 4; + double double = 5; + string string = 6; + bool bool = 7; + ObjectMap object_map = 8; + ListValue list_value = 9; + } + } +} + +enum NullValue { + + NULL_VALUE_INVALID = 0; + NULL_VALUE_NULL = 1; +} + +message GeoLocation { + + oneof geo_location { + LatLonGeoLocation lat_lon_geo_location = 1; + GeoHashLocation geo_hash_location = 2; + NumberArray number_array = 3; + } + +} + +message NumberArray { + + repeated double number_array = 1; + +} + +message LatLonGeoLocation { + // Latitude + .google.protobuf.DoubleValue lat = 1; + + // Longitude + .google.protobuf.DoubleValue lon = 2; + +} + +message GeoHashLocation { + + .google.protobuf.StringValue geohash = 1; + +} + +message GeneralNumber { + oneof value{ + .google.protobuf.Int32Value int32_value = 1; + .google.protobuf.Int64Value int64_value = 2; + .google.protobuf.FloatValue float_value = 3; + .google.protobuf.DoubleValue double_value = 4; + } +} + +message SourceConfigParam { + + oneof source_config_param { + // `true` or `false` to return the `_source` field or not + .google.protobuf.BoolValue bool_value = 1; + // list of fields to be retrieved from `_source` + StringArray string_array = 2; + } + +} + +message StringArray{ + repeated string string_array = 1; +} + +message StringOrStringArray { + oneof string_or_string_array{ + .google.protobuf.StringValue string_value = 1; + StringArray string_array = 2; + } +} + +message SourceConfig { + + oneof source_config{ + // [optional] if the source_config is bool value. true: The entire document source is returned. false: The document source is not returned. + .google.protobuf.BoolValue bool_value = 1; + // [optional] Array of patterns containing source fields to return. + StringArray string_array = 2; + // [optional] source_filter type containing a list of source fields to include or exclude. + SourceFilter source_filter = 3; + } + +} + +message RuntimeField { + + // For type `lookup` + repeated RuntimeFieldFetchFields fetch_fields = 1; + + // A custom format for `date` type runtime fields. + .google.protobuf.StringValue format = 2; + + // Path to field or array of paths. Some API's support wildcards in the path to select multiple fields. + .google.protobuf.StringValue input_field = 3; + + // Path to field or array of paths. Some API's support wildcards in the path to select multiple fields. + .google.protobuf.StringValue target_field = 4; + + .google.protobuf.StringValue target_index = 5; + + Script script = 6; + + enum RuntimeFieldType { + + RUNTIME_FIELD_TYPE_INVALID = 0; + RUNTIME_FIELD_TYPE_BOOLEAN = 1; + RUNTIME_FIELD_TYPE_DATE = 2; + RUNTIME_FIELD_TYPE_DOUBLE = 3; + RUNTIME_FIELD_TYPE_GEO_POINT = 4; + RUNTIME_FIELD_TYPE_IP = 5; + RUNTIME_FIELD_TYPE_KEYWORD = 6; + RUNTIME_FIELD_TYPE_LONG = 7; + RUNTIME_FIELD_TYPE_LOOKUP = 8; + } + + RuntimeFieldType type = 7; + +} + +message RuntimeFieldFetchFields { + + // Path to field or array of paths. Some API's support wildcards in the path to select multiple fields. + .google.protobuf.StringValue field = 1; + + .google.protobuf.StringValue format = 2; + +} + +message SourceFilter { + // [optional] Wildcard (*) patterns are supported as array elements to specify source fields to exclude from the response. + repeated string excludes = 1; + // [optional] Wildcard (*) patterns are supported as array elements to specify source fields to return. + repeated string includes = 2; +} + +message ErrorCause { + + // The type of error + .google.protobuf.StringValue type = 1; + + // A human-readable explanation of the error, in english + .google.protobuf.StringValue reason = 2; + + // The server stack trace. Present only if the `error_trace=true` parameter was sent with the request. + .google.protobuf.StringValue stack_trace = 3; + + ErrorCause caused_by = 4; + + repeated ErrorCause root_cause = 5; + + repeated ErrorCause suppressed = 6; + + .google.protobuf.StringValue index = 7; + + .google.protobuf.StringValue shard = 8; + + .google.protobuf.StringValue index_uuid = 9; + + .google.protobuf.Struct additional_details = 10; + +} + +message ShardStatistics { + // [required] Number of shards that failed to execute the request. Note that shards that are not allocated will be considered neither successful nor failed. Having failed+successful less than total is thus an indication that some of the shards were not allocated. + .google.protobuf.Int32Value failed = 1; + + // [required] Number of shards that executed the request successfully. + .google.protobuf.Int32Value successful = 2; + + // [required] Total number of shards that require querying, including unallocated shards. + .google.protobuf.Int32Value total = 3; + + // [optional] An array of any shard-specific failures that occurred during the search operation. + repeated ShardFailure failures = 4; + + // [optional] Number of shards that skipped the request because a lightweight check helped realize that no documents could possibly match on this shard. This typically happens when a search request includes a range filter and the shard only has values that fall outside of that range. + .google.protobuf.Int32Value skipped = 5; + +} + +message ShardFailure { + + // [optional] Name of the index in which the shard failure occurred. + .google.protobuf.StringValue index = 1; + + // [optional] ID of the node where the shard is located. + .google.protobuf.StringValue node = 2; + + // [required] Provides details about the error that caused the shard failure. + ErrorCause reason = 3; + + // [required] The shard number where the failure occurred. + .google.protobuf.Int32Value shard = 4; + + // [optional] Error status. + .google.protobuf.StringValue status = 5; + +} + +message QueryContainer { + + BoolQuery bool = 1; + + BoostingQuery boosting = 2; + + ConstantScoreQuery constant_score = 3; + + DisMaxQuery dis_max = 4; + + FunctionScoreQuery function_score = 5; + + // [optional] + // Use the exists query to search for documents that contain a specific field. + ExistsQuery exists = 6; + + // [optional] + // Fuzzy query is to searches for documents containing terms that are similar to the search term within the maximum allowed Damerau–Levenshtein distance. The Damerau–Levenshtein distance measures the number of one-character changes needed to change one term to another term. + // only 1 entry can be provided in the map + map fuzzy = 7; + + IdsQuery ids = 8; + + // [optional] + // Prefix query is to search for terms that begin with a specific prefix. + // only 1 entry can be provided in the map + map prefix = 9; + + // Returns documents that contain terms within a provided range. + // only 1 entry can be provided in the map + map range = 10; + + // Returns documents that contain terms matching a regular expression. + // only 1 entry can be provided in the map + map regexp = 11; + + // [optional] + // Term query is to search for an exact term in a field. The term query does not analyze the search term. The term query only searches for the exact term you provide. + // only 1 entry can be provided in the map + map term = 12; + + // [optional] + // Terms query field is to search for documents containing one or more terms in a specific field. Use the terms query to search for multiple terms in the same field. + TermsQueryField terms = 13; + + // [optional] + // terms set query is to search for documents that match a minimum number of exact terms in a specified field. A terms_set query is similar to a terms query, except that you can specify the minimum number of matching terms that are required in order to return a document. You can specify this number either in a field in the index or with a script. + // only 1 entry can be provided in the map + map terms_set = 14; + + // Returns documents that contain terms matching a wildcard pattern. + // only 1 entry can be provided in the map + map wildcard = 15; + + // [optional] + // Use the match query for full-text search on a specific document field. If you run a match query on a text field, the match query analyzes the provided search string and returns documents that match any of the string's terms. If you run a match query on an exact-value field, it returns documents that match the exact value. The preferred way to search exact-value fields is to use a filter because, unlike a query, a filter is cached. + // only 1 entry can be provided in the map + map match = 16; + + // [optional] + // The match_bool_prefix query analyzes the provided search string and creates a Boolean query from the string's terms. It uses every term except the last term as a whole word for matching. The last term is used as a prefix. The match_bool_prefix query returns documents that contain either the whole-word terms or terms that start with the prefix term, in any order. + // only 1 entry can be provided in the map + map match_bool_prefix = 17; + + // [optional] + // Use the match_phrase query to match documents that contain an exact phrase in a specified order. You can add flexibility to phrase matching by providing the slop parameter. + // only 1 entry can be provided in the map + map match_phrase = 18; + + // [optional] + // Use the match_phrase_prefix query to specify a phrase to match in order. The documents that contain the phrase you specify will be returned. The last partial term in the phrase is interpreted as a prefix, so any documents that contain phrases that begin with the phrase and prefix of the last term will be returned. + // only 1 entry can be provided in the map + map match_phrase_prefix = 19; + + MultiMatchQuery multi_match = 20; + + QueryStringQuery query_string = 21; + + SimpleQueryStringQuery simple_query_string = 22; + + // Returns documents based on the order and proximity of matching terms. + // only 1 entry can be provided in the map + map intervals = 23; + + // [optional] + // Knn query is to search for the k-nearest neighbors to a query point across an index of vectors. To determine the neighbors, you can specify the space (the distance function) you want to use to measure the distance between points. + // only 1 entry can be provided in the map + map knn = 24; + + // [optional] + // The match all query returns all documents. This query can be useful in testing large document sets if you need to return the entire set. + MatchAllQuery match_all = 25; + + MatchNoneQuery match_none = 26; + + ScriptScoreQuery script_score = 27; + + NestedQuery nested = 28; + +} + +message NestedQuery { + + // Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. + .google.protobuf.FloatValue boost = 1; + + .google.protobuf.StringValue name = 2 [json_name = "_name"]; + + // Set to `true` to ignore an unmapped field and not match any documents for this query. Set to `false` to throw an exception if the field is not mapped. + .google.protobuf.BoolValue ignore_unmapped = 3; + + InnerHits inner_hits = 4; + + // The path to a field or an array of paths. Some APIs support wildcards in the path, which allows you to select multiple fields. + .google.protobuf.StringValue path = 5; + + QueryContainer query = 6; + + enum ChildScoreMode { + + CHILD_SCORE_MODE_INVALID = 0; + CHILD_SCORE_MODE_AVG = 1; + CHILD_SCORE_MODE_MAX = 2; + CHILD_SCORE_MODE_MIN = 3; + CHILD_SCORE_MODE_NONE = 4; + CHILD_SCORE_MODE_SUM = 5; + } + ChildScoreMode score_mode = 7; + +} + +message InnerHits { + + // [optional] The name to be used for the particular inner hit definition in the response. Useful when multiple inner hits have been defined in a single search request. + .google.protobuf.StringValue name = 1; + + // [optional] The maximum number of hits to return per `inner_hits`. + .google.protobuf.Int32Value size = 2; + + // [optional] Inner hit starting document offset. + .google.protobuf.Int32Value from = 3; + + // [optional] The collapse parameter groups search results by a particular field value. This returns only the top document within each group, which helps reduce redundancy by eliminating duplicates. + FieldCollapse collapse = 4; + + // [optional] The fields that OpenSearch should return using their docvalue forms. Specify a format to return results in a certain format, such as date and time. + repeated FieldAndFormat docvalue_fields = 5; + + // [optional] Whether to return details about how OpenSearch computed the document's score. Default is false. + .google.protobuf.BoolValue explain = 6; + + // [optional] Highlighting emphasizes the search term(s) in the results so you can emphasize the query matches. + Highlight highlight = 7; + + // [optional] Specifies how to treat an unmapped field. Set ignore_unmapped to true to ignore unmapped fields. Default is false + .google.protobuf.BoolValue ignore_unmapped = 8; + + // [optional] The script_fields parameter allows you to include custom fields whose values are computed using scripts in your search results. This can be useful for calculating values dynamically based on the document data. You can also retrieve derived fields by using a similar approach. + map script_fields = 9; + + // [optional] Whether to return sequence number and primary term of the last operation of each document hit. + .google.protobuf.BoolValue seq_no_primary_term = 10; + + // [optional] Retrieve selected fields from a search + repeated string fields = 11; + + // [optional] How the inner hits should be sorted per inner_hits. By default the hits are sorted by the score. + repeated SortCombinations sort = 12; + + // [optional] Select what fields of the source are returned + SourceConfig source = 13 [json_name = "_source"]; + + // [optional] A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this option is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response. + repeated string stored_fields = 14; + + // [optional] Whether to return document scores. Default is false. + .google.protobuf.BoolValue track_scores = 15; + + // [optional] Whether to include the document version as a match. + .google.protobuf.BoolValue version = 16; + +} + +message ScriptField { + Script script = 1; + .google.protobuf.BoolValue ignore_failure = 2; +} + +message Highlight { + + enum HighlighterType { + + HIGHLIGHTER_TYPE_INVALID = 0; + // The fvh highlighter uses the Lucene Fast Vector highlighter. This highlighter can be used on fields with term_vector set to with_positions_offsets in the mapping. + HIGHLIGHTER_TYPE_FVH = 1; + // The plain highlighter uses the standard Lucene highlighter. It attempts to reflect the query matching logic in terms of understanding word importance and any word positioning criteria in phrase queries. + HIGHLIGHTER_TYPE_PLAIN = 2; + // The unified highlighter uses the Lucene Unified Highlighter. This highlighter breaks the text into sentences and uses the BM25 algorithm to score individual sentences as if they were documents in the corpus. It also supports accurate phrase and multi-term (fuzzy, prefix, regex) highlighting. The unified highlighter can combine matches from multiple fields into one result (see matched_fields). This is the default highlighter. + HIGHLIGHTER_TYPE_UNIFIED = 3; + } + + // [optional] Specifies the highlighter to use. Default is unified. + HighlighterType type = 1; + + // [optional] All boundary characters combined in a string. Default is ".,!? \t\n". + .google.protobuf.StringValue boundary_chars = 2; + + // [optional] Controls how far to scan for boundary characters when the boundary_scanner parameter for the fvh highlighter is set to chars. Default is 20. + .google.protobuf.Int32Value boundary_max_scan = 3; + + enum BoundaryScanner { + + BOUNDARY_SCANNER_INVALID = 0; + // Split highlighted fragments at any character listed in boundary_chars. Valid only for the fvh highlighter. + BOUNDARY_SCANNER_CHARS = 1; + // Split highlighted fragments at sentence boundaries, as defined by the BreakIterator. You can specify the BreakIterator's locale in the boundary_scanner_locale option. + BOUNDARY_SCANNER_SENTENCE = 2; + // Split highlighted fragments at word boundaries, as defined by the BreakIterator. You can specify the BreakIterator's locale in the boundary_scanner_locale option. + BOUNDARY_SCANNER_WORD = 3; + } + + // [optional] Specifies whether to split the highlighted fragments into sentences, words, or characters. + BoundaryScanner boundary_scanner = 4; + + // [optional] Provides a locale for the boundary_scanner. Valid values are language tags (for example, "en-US"). Default is Locale.ROOT. + .google.protobuf.StringValue boundary_scanner_locale = 5; + + // [x-deprecated] + .google.protobuf.BoolValue force_source = 6; + + enum HighlighterFragmenter { + + HIGHLIGHTER_FRAGMENTER_INVALID = 0; + // Splits text into fragments of the same size. + HIGHLIGHTER_FRAGMENTER_SIMPLE = 1; + // Splits text into fragments of the same size but tries not to split text between highlighted terms. + HIGHLIGHTER_FRAGMENTER_SPAN = 2; + } + + // [optional] Specifies how to split text into highlighted fragments. Valid only for the plain highlighter. Default HIGHLIGHTER_FRAGMENTER_SPAN. + HighlighterFragmenter fragmenter = 7; + + // [optional] The size of a highlighted fragment, specified as the number of characters. If number_of_fragments is set to 0, fragment_size is ignored. Default is 100. + .google.protobuf.Int32Value fragment_size = 8; + + .google.protobuf.BoolValue highlight_filter = 9; + + // [optional] Specifies that matches for a query other than the search query should be highlighted. The highlight_query option is useful when you use a faster query to get document matches and a slower query (for example, rescore_query) to refine the results. We recommend to include the search query as part of the highlight_query. + QueryContainer highlight_query = 10; + + .google.protobuf.Int32Value max_fragment_length = 11; + + // [optional] If set to a non-negative value, highlighting stops at this defined maximum limit. The rest of the text is not processed, thus not highlighted and no error is returned The `max_analyzed_offset` query setting does not override the `index.highlight.max_analyzed_offset` setting, which prevails when it's set to lower value than the query setting. + .google.protobuf.Int32Value max_analyzed_offset = 12; + + // [optional] Specifies the number of characters, starting from the beginning of the field, to return if there are no matching fragments to highlight. Default is 0. + .google.protobuf.Int32Value no_match_size = 13; + + // [optional] The maximum number of returned fragments. If number_of_fragments is set to 0, OpenSearch returns the highlighted contents of the entire field. Default is 5. + .google.protobuf.Int32Value number_of_fragments = 14; + + ObjectMap options = 15; + + enum HighlighterOrder { + + HIGHLIGHTER_ORDER_INVALID = 0; + // Sort fragments by relevance. + HIGHLIGHTER_ORDER_SCORE = 1; + } + + // [optional] The sort order for the highlighted fragments. Each highlighter has a different algorithm for calculating relevance scores. Default is none. + HighlighterOrder order = 16; + + // [optional] The number of matching phrases in a document that are considered. Limits the number of phrases to analyze by the fvh highlighter to avoid consuming a lot of memory. If matched_fields are used, phrase_limit specifies the number of phrases for each matched field. A higher phrase_limit leads to increased query time and more memory consumption. Valid only for the fvh highlighter. Default is 256. + .google.protobuf.Int32Value phrase_limit = 17; + + // [optional] Specifies the HTML end tags for the highlighted text as an array of strings. + repeated string post_tags = 18; + + // [optional] Specifies the HTML start tags for the highlighted text as an array of strings. + repeated string pre_tags = 19; + + // [optional] Specifies whether to highlight only fields that contain a search query match. Default is true. To highlight all fields, set this option to false. + .google.protobuf.BoolValue require_field_match = 20; + + enum HighlighterTagsSchema { + + HIGHLIGHTER_TAGS_SCHEMA_INVALID = 0; + // Defines the following pre_tags and defines post_tags as . + HIGHLIGHTER_TAGS_SCHEMA_STYLED = 1; + } + + // [optional] If you set this option to styled, OpenSearch uses the built-in tag schema. In this schema, the pre_tags are , , , , , , , , , and , and the post_tags is . + HighlighterTagsSchema tags_schema = 21; + + enum HighlighterEncoder { + + HIGHLIGHTER_ENCODER_INVALID = 0; + // No encoding + HIGHLIGHTER_ENCODER_DEFAULT = 1; + // First escape the HTML text and then insert the highlighting tags + HIGHLIGHTER_ENCODER_HTML = 2; + } + + // [optional] Specifies whether the highlighted fragment should be HTML encoded before it is returned. + HighlighterEncoder encoder = 22; + + // [required] Specifies the fields to search for text to be highlighted. Supports wildcard expressions. If you use wildcards, only text and keyword fields are highlighted. For example, you can set fields to my_field* to include all text and keyword fields that start with the prefix my_field. + map fields = 23; + +} + +message HighlightField { + + enum HighlighterType { + + HIGHLIGHTER_TYPE_INVALID = 0; + // The fvh highlighter uses the Lucene Fast Vector highlighter. This highlighter can be used on fields with term_vector set to with_positions_offsets in the mapping. + HIGHLIGHTER_TYPE_FVH = 1; + // The plain highlighter uses the standard Lucene highlighter. It attempts to reflect the query matching logic in terms of understanding word importance and any word positioning criteria in phrase queries. + HIGHLIGHTER_TYPE_PLAIN = 2; + // The unified highlighter uses the Lucene Unified Highlighter. This highlighter breaks the text into sentences and uses the BM25 algorithm to score individual sentences as if they were documents in the corpus. It also supports accurate phrase and multi-term (fuzzy, prefix, regex) highlighting. The unified highlighter can combine matches from multiple fields into one result (see matched_fields). This is the default highlighter. + HIGHLIGHTER_TYPE_UNIFIED = 3; + } + + // [optional] Specifies the highlighter to use. Default is unified. + HighlighterType type = 1; + + // [optional] All boundary characters combined in a string. Default is ".,!? \t\n". + .google.protobuf.StringValue boundary_chars = 2; + + // [optional] Controls how far to scan for boundary characters when the boundary_scanner parameter for the fvh highlighter is set to chars. Default is 20. + .google.protobuf.Int32Value boundary_max_scan = 3; + + enum BoundaryScanner { + + BOUNDARY_SCANNER_INVALID = 0; + // Split highlighted fragments at any character listed in boundary_chars. Valid only for the fvh highlighter. + BOUNDARY_SCANNER_CHARS = 1; + // Split highlighted fragments at sentence boundaries, as defined by the BreakIterator. You can specify the BreakIterator's locale in the boundary_scanner_locale option. + BOUNDARY_SCANNER_SENTENCE = 2; + // Split highlighted fragments at word boundaries, as defined by the BreakIterator. You can specify the BreakIterator's locale in the boundary_scanner_locale option. + BOUNDARY_SCANNER_WORD = 3; + } + + // [optional] Specifies whether to split the highlighted fragments into sentences, words, or characters. + BoundaryScanner boundary_scanner = 4; + + // [optional] Provides a locale for the boundary_scanner. Valid values are language tags (for example, "en-US"). Default is Locale.ROOT. + .google.protobuf.StringValue boundary_scanner_locale = 5; + + // [x-deprecated] + .google.protobuf.BoolValue force_source = 6; + + enum HighlighterFragmenter { + + HIGHLIGHTER_FRAGMENTER_INVALID = 0; + // Splits text into fragments of the same size. + HIGHLIGHTER_FRAGMENTER_SIMPLE = 1; + // Splits text into fragments of the same size but tries not to split text between highlighted terms. + HIGHLIGHTER_FRAGMENTER_SPAN = 2; + } + + // [optional] Specifies how to split text into highlighted fragments. Valid only for the plain highlighter. Default HIGHLIGHTER_FRAGMENTER_SPAN. + HighlighterFragmenter fragmenter = 7; + + // [optional] The size of a highlighted fragment, specified as the number of characters. If number_of_fragments is set to 0, fragment_size is ignored. Default is 100. + .google.protobuf.Int32Value fragment_size = 8; + + .google.protobuf.BoolValue highlight_filter = 9; + + // [optional] Specifies that matches for a query other than the search query should be highlighted. The highlight_query option is useful when you use a faster query to get document matches and a slower query (for example, rescore_query) to refine the results. We recommend to include the search query as part of the highlight_query. + QueryContainer highlight_query = 10; + + .google.protobuf.Int32Value max_fragment_length = 11; + + // [optional] If set to a non-negative value, highlighting stops at this defined maximum limit. The rest of the text is not processed, thus not highlighted and no error is returned The `max_analyzed_offset` query setting does not override the `index.highlight.max_analyzed_offset` setting, which prevails when it's set to lower value than the query setting. + .google.protobuf.Int32Value max_analyzed_offset = 12; + + // [optional] Specifies the number of characters, starting from the beginning of the field, to return if there are no matching fragments to highlight. Default is 0. + .google.protobuf.Int32Value no_match_size = 13; + + // [optional] The maximum number of returned fragments. If number_of_fragments is set to 0, OpenSearch returns the highlighted contents of the entire field. Default is 5. + .google.protobuf.Int32Value number_of_fragments = 14; + + ObjectMap options = 15; + + enum HighlighterOrder { + + HIGHLIGHTER_ORDER_INVALID = 0; + // Sort fragments by relevance. + HIGHLIGHTER_ORDER_SCORE = 1; + } + + // [optional] The sort order for the highlighted fragments. Each highlighter has a different algorithm for calculating relevance scores. Default is none. + HighlighterOrder order = 16; + + // [optional] The number of matching phrases in a document that are considered. Limits the number of phrases to analyze by the fvh highlighter to avoid consuming a lot of memory. If matched_fields are used, phrase_limit specifies the number of phrases for each matched field. A higher phrase_limit leads to increased query time and more memory consumption. Valid only for the fvh highlighter. Default is 256. + .google.protobuf.Int32Value phrase_limit = 17; + + // [optional] Specifies the HTML end tags for the highlighted text as an array of strings. + repeated string post_tags = 18; + + // [optional] Specifies the HTML start tags for the highlighted text as an array of strings. + repeated string pre_tags = 19; + + // [optional] Specifies whether to highlight only fields that contain a search query match. Default is true. To highlight all fields, set this option to false. + .google.protobuf.BoolValue require_field_match = 20; + + enum HighlighterTagsSchema { + + HIGHLIGHTER_TAGS_SCHEMA_INVALID = 0; + // Defines the following pre_tags and defines post_tags as . + HIGHLIGHTER_TAGS_SCHEMA_STYLED = 1; + } + + HighlighterTagsSchema tags_schema = 21; + + // [optional] If you set this option to styled, OpenSearch uses the built-in tag schema. In this schema, the pre_tags are , , , , , , , , , and , and the post_tags is . + .google.protobuf.Int32Value fragment_offset = 22; + + // [optional] Combines matches from different fields to highlight one field. The most common use case for this functionality is highlighting text that is analyzed in different ways and kept in multi-fields. All fields in the matched_fields list must have the term_vector field set to with_positions_offsets. The field in which the matches are combined is the only loaded field, so it is beneficial to set its store option to yes. Valid only for the fvh highlighter. + repeated string matched_fields = 23; + + Analyzer analyzer = 24; + +} + +message SortCombinations { + oneof sort_combinations{ + // [optional] Sort based on field name. + .google.protobuf.StringValue string_value = 1; + // [optional] Sort based on a map of fields with specified order directions. + FieldWithOrderMap field_with_order_map = 2; + // [optional] Sort using a combination of advanced sort options, such as score, document ID, geo-distance, or a custom script. + SortOptions sort_options = 3; + } +} + +message FieldWithOrderMap { + // [required] Map of fields and their corresponding sort order. + map field_with_order_map = 1; +} + +message SortOptions { + oneof sort_options{ + // [optional] Sort by score. + ScoreSort score = 1 [json_name = "_score"]; + // [optional] Sort by index order. + ScoreSort doc = 2 [json_name = "_doc"]; + // [optional] Sort by _geo_distance + GeoDistanceSort geo_distance = 3 [json_name = "_geo_distance"]; + // [optional] Sort based on custom scripts + ScriptSort script = 4 [json_name = "_script"]; + } +} + +message ScoreSort { + // Specifies the sort order (asc or dsc) for the score. + SortOrder order = 1; + enum SortOrder { + + SORT_ORDER_INVALID = 0; + // Sort in ascending order. + SORT_ORDER_ASC = 1; + // Sort in descending order + SORT_ORDER_DESC = 2; + } +} + +message GeoDistanceSort { + + // Specifies how to handle a field with several geopoints. + SortMode mode = 1; + enum SortMode { + + SORT_MODE_INVALID = 0; + // Use the average of all values as sort value. Only applicable for number based array fields. + SORT_MODE_AVG = 1; + // Pick the highest value. + SORT_MODE_MAX = 2; + // Use the median of all values as sort value. Only applicable for number based array fields. + SORT_MODE_MEDIAN = 3; + // Pick the lowest value. + SORT_MODE_MIN = 4; + // Use the sum of all values as sort value. Only applicable for number based array fields. + SORT_MODE_SUM = 5; + } + + // [optional] Specifies the method of computing the distance. + GeoDistanceType distance_type = 2; + + enum GeoDistanceType { + + GEO_DISTANCE_TYPE_INVALID = 0; + // Default + GEO_DISTANCE_TYPE_ARC = 1; + // Faster but less accurate for long distances or close to the poles. + GEO_DISTANCE_TYPE_PLANE = 2; + } + + // [optional] Specifies how to treat an unmapped field. Set ignore_unmapped to true to ignore unmapped fields. Default is false + .google.protobuf.BoolValue ignore_unmapped = 3; + + // [optional] Specifies the sort order (asc or dsc) for the score. + SortOrder order = 4; + + enum SortOrder { + + SORT_ORDER_INVALID = 0; + // Sort in ascending order. + SORT_ORDER_ASC = 1; + // Sort in descending order + SORT_ORDER_DESC = 2; + } + + // [optional] Specifies the units used to compute sort values. Default is meters (m). + DistanceUnit unit = 5; + + enum DistanceUnit { + + DISTANCE_UNIT_INVALID = 0; + DISTANCE_UNIT_CM = 1; + DISTANCE_UNIT_FT = 2; + DISTANCE_UNIT_IN = 3; + DISTANCE_UNIT_KM = 4; + DISTANCE_UNIT_M = 5; + DISTANCE_UNIT_MI = 6; + DISTANCE_UNIT_MM = 7; + DISTANCE_UNIT_NMI = 8; + DISTANCE_UNIT_YD = 9; + } +} + +message ScriptSort { + + // [optional] Specifies the sort order (asc or dsc) for the score. + SortOrder order = 1; + enum SortOrder { + + SORT_ORDER_INVALID = 0; + // Sort in ascending order. + SORT_ORDER_ASC = 1; + // Sort in descending order + SORT_ORDER_DESC = 2; + } + + // [optional] The script to execute for custom sorting. + Script script = 2; + + // [optional] Specifies script sort type. + ScriptSortType type = 3; + enum ScriptSortType { + + SCRIPT_SORT_TYPE_INVALID = 0; + SCRIPT_SORT_TYPE_NUMBER = 1; + SCRIPT_SORT_TYPE_STRING = 2; + SCRIPT_SORT_TYPE_VERSION = 3; + } + + // [optional] Specifies what array value should be chosen for sorting the document. + SortMode mode = 4; + enum SortMode { + + SORT_MODE_INVALID = 0; + // Use the average of all values as sort value. Only applicable for number based array fields. + SORT_MODE_AVG = 1; + // Pick the highest value. + SORT_MODE_MAX = 2; + // Use the median of all values as sort value. Only applicable for number based array fields. + SORT_MODE_MEDIAN = 3; + // Pick the lowest value. + SORT_MODE_MIN = 4; + // Use the sum of all values as sort value. Only applicable for number based array fields. + SORT_MODE_SUM = 5; + } + + // Supports sorting by fields that are inside one or more nested objects. + NestedSortValue nested = 5; + +} + +message NestedSortValue { + + // [optional] A filter that the inner objects inside the nested path should match with in order for its field values to be taken into account by sorting. Common case is to repeat the query / filter inside the nested filter or query. By default no filter is active. + QueryContainer filter = 1; + + // [optional] The maximum number of children to consider per root document when picking the sort value. Defaults to unlimited. + .google.protobuf.Int32Value max_children = 2; + + // [optional] Same as top-level nested but applies to another nested path within the current nested object. + NestedSortValue nested = 3; + + // [required] Specifies the path to the field on which to sort. + .google.protobuf.StringValue path = 4; + +} + +message FieldAndFormat { + + // [required] Wildcard pattern. The request returns doc values for field names matching this pattern. + .google.protobuf.StringValue field = 1; + + // [optional] Format in which the values are returned. + .google.protobuf.StringValue format = 2; + + // [optional] Retrieve unmapped fields in an object from _source + .google.protobuf.BoolValue include_unmapped = 3; + +} + +message FieldCollapse { + + // [required] The document field by which you want to group or collapse the search results + .google.protobuf.StringValue field = 1; + + // [optional] Expanding each group uses an additional query for each inner_hit request for every collapsed hit in the response. + repeated InnerHits inner_hits = 2; + + // [optional] Use to control the maximum number of concurrent searches allowed in this phase. + .google.protobuf.Int32Value max_concurrent_group_searches = 3; + + // [optional] Nested collapse within this collapse. + FieldCollapse collapse = 4; + +} + +message ScriptScoreQuery { + + // Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. + .google.protobuf.FloatValue boost = 1; + + .google.protobuf.StringValue name = 2 [json_name = "_name"]; + + // Documents with a score lower than this floating point number are excluded from the search results. + .google.protobuf.FloatValue min_score = 3; + + QueryContainer query = 4; + + Script script = 5; + +} + +message ExistsQuery { + + // [optional] + // Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. + .google.protobuf.FloatValue boost = 1; + // [optional] + // Query name for query tagging. + .google.protobuf.StringValue name = 2 [json_name = "_name"]; + + // [required] + // Name of the field you wish to search. + .google.protobuf.StringValue field = 3; + +} + +message SimpleQueryStringQuery { + + // Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. + .google.protobuf.FloatValue boost = 1; + + .google.protobuf.StringValue name = 2 [json_name = "_name"]; + + // Analyzer used to convert text in the query .google.protobuf.StringValue into tokens. + .google.protobuf.StringValue analyzer = 3; + + // If `true`, the query attempts to analyze wildcard terms in the query string. + .google.protobuf.BoolValue analyze_wildcard = 4; + + // If `true`, the parser creates a match_phrase query for each multi-position token. + .google.protobuf.BoolValue auto_generate_synonyms_phrase_query = 5; + + Operator default_operator = 6; + enum Operator { + + OPERATOR_INVALID = 0; + OPERATOR_AND = 1; + OPERATOR_OR = 2; + } + + repeated string fields = 7; + + PipeSeparatedFlagsSimpleQueryStringFlag flags = 8; + + // Maximum number of terms to which the query expands for fuzzy matching. + .google.protobuf.Int32Value fuzzy_max_expansions = 9; + + // Number of beginning characters left unchanged for fuzzy matching. + .google.protobuf.Int32Value fuzzy_prefix_length = 10; + + // If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). + .google.protobuf.BoolValue fuzzy_transpositions = 11; + + // If `true`, format-based errors, such as providing a text value for a numeric field, are ignored. + .google.protobuf.BoolValue lenient = 12; + + MinimumShouldMatch minimum_should_match = 13; + // Query .google.protobuf.StringValue in the simple query .google.protobuf.StringValue syntax you wish to parse and use for search. + .google.protobuf.StringValue query = 14; + + // Suffix appended to quoted text in the query string. + .google.protobuf.StringValue quote_field_suffix = 15; + +} + +message WildcardQuery { + + // Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. + .google.protobuf.FloatValue boost = 1; + + .google.protobuf.StringValue name = 2 [json_name = "_name"]; + + // Allows case insensitive matching of the pattern with the indexed field values when set to `true`. Default is `false` which means the case sensitivity of matching depends on the underlying field's mapping. + .google.protobuf.BoolValue case_insensitive = 3; + + MultiTermQueryRewrite rewrite = 4; + enum MultiTermQueryRewrite { + + MULTI_TERM_QUERY_REWRITE_INVALID = 0; + MULTI_TERM_QUERY_REWRITE_CONSTANT_SCORE = 1; + MULTI_TERM_QUERY_REWRITE_CONSTANT_SCORE_BOOLEAN = 2; + MULTI_TERM_QUERY_REWRITE_SCORING_BOOLEAN = 3; + MULTI_TERM_QUERY_REWRITE_TOP_TERMS_N = 4; + MULTI_TERM_QUERY_REWRITE_TOP_TERMS_BLENDED_FREQS_N = 5; + MULTI_TERM_QUERY_REWRITE_TOP_TERMS_BOOST_N = 6; + } + + // Wildcard pattern for terms you wish to find in the provided field. Required, when wildcard is not set. + .google.protobuf.StringValue value = 5; + + // Wildcard pattern for terms you wish to find in the provided field. Required, when value is not set. + .google.protobuf.StringValue wildcard = 6; + +} + +message PipeSeparatedFlagsSimpleQueryStringFlag { + + enum SimpleQueryStringFlag { + + SIMPLE_QUERY_STRING_FLAG_INVALID = 0; + SIMPLE_QUERY_STRING_FLAG_ALL = 1; + SIMPLE_QUERY_STRING_FLAG_AND = 2; + SIMPLE_QUERY_STRING_FLAG_ESCAPE = 3; + SIMPLE_QUERY_STRING_FLAG_FUZZY = 4; + SIMPLE_QUERY_STRING_FLAG_NEAR = 5; + SIMPLE_QUERY_STRING_FLAG_NONE = 6; + SIMPLE_QUERY_STRING_FLAG_NOT = 7; + SIMPLE_QUERY_STRING_FLAG_OR = 8; + SIMPLE_QUERY_STRING_FLAG_PHRASE = 9; + SIMPLE_QUERY_STRING_FLAG_PRECEDENCE = 10; + SIMPLE_QUERY_STRING_FLAG_PREFIX = 11; + SIMPLE_QUERY_STRING_FLAG_SLOP = 12; + SIMPLE_QUERY_STRING_FLAG_WHITESPACE = 13; + } + + oneof pipe_separated_flags_simple_query_string_flag{ + .google.protobuf.StringValue string_value = 1; + SimpleQueryStringFlag simple_query_string_flag = 2; + } + +} + +message KnnField { + // [optional] + // Query vector. Must have the same number of dimensions as the vector field you are searching against. + repeated float vector = 1; + + // [optional] + // The final number of nearest neighbors to return as top hits. + .google.protobuf.Int32Value k = 2; + + // [optional] + // The minimum similarity score for a neighbor to be considered a hit. + .google.protobuf.FloatValue min_score = 3; + + // [optional] + // The maximum physical distance in vector space for a neighbor to be considered a hit. + .google.protobuf.FloatValue max_distance = 4; + + // [optional] + // Filters for the kNN search query. The kNN search will return the top k documents that also match this filter. If filter is not provided, all documents are allowed to match. + QueryContainer filter = 5; + + // [optional] + // Boost value to apply to kNN scores + .google.protobuf.FloatValue boost = 6; + + // [optional] + // Method parameters are dependent on the combination of engine and method used to create the index. + // Available method ef_search see https://opensearch.org/docs/latest/search-plugins/knn/approximate-knn/#ef_search and nprobes "https://opensearch.org/docs/latest/search-plugins/knn/approximate-knn/#nprobes" + map method_parameters = 7; + + // [optional] + // Available in version later than 2.17 + // To explicitly apply rescoring, provide the rescore parameter in a query on a quantized index and specify the oversample_factor "https://opensearch.org/docs/latest/search-plugins/knn/approximate-knn/#rescoring-quantized-results-using-full-precision" + map rescore = 8; + +} + +message MatchQueryTypeless { + oneof match_query_typeless { + // Standard match query for performing a full-text search, including options for fuzzy matching. + MatchQuery match_query = 1; + // Simplified match query syntax by combining the and query parameters + ObjectMap object_map = 2; + } +} + +message MatchQuery { + + // [optional] + // Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. + .google.protobuf.FloatValue boost = 1; + + // [optional] + // Query name for query tagging + .google.protobuf.StringValue name = 2 [json_name = "_name"]; + + // [optional] + // The analyzer used to tokenize the query string text. + // Default is the index-time analyzer specified for the default_field. If no analyzer is specified for the default_field, the analyzer is the default analyzer for the index. + .google.protobuf.StringValue analyzer = 3; + + // [optional] + // Specifies whether to create a match phrase query automatically for multi-term synonyms. + // For example, if you specify ba,batting average as synonyms and search for ba, OpenSearch searches for ba OR "batting average" (if this option is true) or ba OR (batting AND average) (if this option is false). + // Default is true. + .google.protobuf.BoolValue auto_generate_synonyms_phrase_query = 4; + + // [x-deprecated] + .google.protobuf.FloatValue cutoff_frequency = 5; + + // [optional] + // The number of character edits (insertions, deletions, substitutions, or transpositions) that it takes to change one word to another when determining whether a term matched a value. + // For example, the distance between wined and wind is 1. Valid values are non-negative integers or AUTO. + // The default, AUTO, chooses a value based on the length of each term and is a good choice for most use cases. + Fuzziness fuzziness = 6; + + // [optional] + // Determines how OpenSearch rewrites the query. + // Default is constant_score. + MultiTermQueryRewrite fuzzy_rewrite = 7; + enum MultiTermQueryRewrite { + + MULTI_TERM_QUERY_REWRITE_INVALID = 0; + // Uses the constant_score_boolean method for fewer matching terms. Otherwise, this method finds all matching terms in sequence and returns matching documents using a bit set. + MULTI_TERM_QUERY_REWRITE_CONSTANT_SCORE = 1; + // Assigns each document a relevance score equal to the boost parameter. + // This method changes the original query to a bool query. This bool query contains a should clause and term query for each matching term. This method can cause the final bool query to exceed the clause limit in the indices.query.bool.max_clause_count setting. If the query exceeds this limit, opensearch returns an error. + MULTI_TERM_QUERY_REWRITE_CONSTANT_SCORE_BOOLEAN = 2; + // Calculates a relevance score for each matching document. + // This method changes the original query to a bool query. This bool query contains a should clause and term query for each matching term. This method can cause the final bool query to exceed the clause limit in the indices.query.bool.max_clause_count setting. If the query exceeds this limit, Elasticsearch returns an error. + MULTI_TERM_QUERY_REWRITE_SCORING_BOOLEAN = 3; + // Calculates a relevance score for each matching document. + // This method changes the original query to a bool query. This bool query contains a should clause and term query for each matching term. The final bool query only includes term queries for the top N scoring terms. You can use this method to avoid exceeding the clause limit in the indices.query.bool.max_clause_count setting. + MULTI_TERM_QUERY_REWRITE_TOP_TERMS_N = 4; + // Calculates a relevance score for each matching document as if all terms had the same frequency. This frequency is the maximum frequency of all matching terms. + // This method changes the original query to a bool query. This bool query contains a should clause and term query for each matching term. The final bool query only includes term queries for the top N scoring terms. You can use this method to avoid exceeding the clause limit in the indices.query.bool.max_clause_count setting. + MULTI_TERM_QUERY_REWRITE_TOP_TERMS_BLENDED_FREQS_N = 5; + // Calculates a relevance score for each matching document. + // This method changes the original query to a bool query. This bool query contains a should clause and term query for each matching term. The final bool query only includes term queries for the top N scoring terms. You can use this method to avoid exceeding the clause limit in the indices.query.bool.max_clause_count setting. + MULTI_TERM_QUERY_REWRITE_TOP_TERMS_BOOST_N = 6; + } + + // [optional] + // Setting fuzzy_transpositions to true (default) adds swaps of adjacent characters to the insert, delete, and substitute operations of the fuzziness option. + // For example, the distance between wind and wnid is 1 if fuzzy_transpositions is true (swap “n” and “i”) and 2 if it is false (delete “n”, insert “n”). If fuzzy_transpositions is false, rewind and wnid have the same distance (2) from wind, despite the more human-centric opinion that wnid is an obvious typo. + // The default(true) is a good choice for most use cases. + .google.protobuf.BoolValue fuzzy_transpositions = 8; + + // [optional] + // Setting lenient to true ignores data type mismatches between the query and the document field. + // For example, a query string of "8.2" could match a field of type float. + // Default is false. + .google.protobuf.BoolValue lenient = 9; + + // [optional] + // The maximum number of terms to which the query can expand. Fuzzy queries “expand to” a number of matching terms that are within the distance specified in fuzziness. Then OpenSearch tries to match those terms. + // Default is 50. + .google.protobuf.Int32Value max_expansions = 10; + + // [optional] + // If the query string contains multiple search terms and you use the or operator, the number of terms that need to match for the document to be considered a match. + // For example, if minimum_should_match is 2, wind often rising does not match The Wind Rises. If minimum_should_match is 1, it matches. + MinimumShouldMatch minimum_should_match = 11; + + // [optional] + // If the query string contains multiple search terms, whether all terms need to match (AND) or only one term needs to match (OR) for a document to be considered a match. + // Default is OR. + Operator operator = 12; + enum Operator { + + OPERATOR_INVALID = 0; + // All terms need to match. The string `to be` is interpreted as `to AND be` + OPERATOR_AND = 1; + // Only one term needs to match. The string `to be` is interpreted as `to OR be` + OPERATOR_OR = 2; + } + + // [optional] + // The number of leading characters that are not considered in fuzziness. + // Default is 0. + .google.protobuf.Int32Value prefix_length = 13; + + message Query { + oneof query{ + // if the query value is string type. + .google.protobuf.StringValue string_value = 1; + // if the query value is number type. + GeneralNumber general_number = 2; + // if the query value is boolean type. + .google.protobuf.BoolValue bool_value = 3; + } + } + + // [required] + // The query string to use for search. + Query query = 14; + + // [optional] + // In some cases, the analyzer removes all terms from a query string. + // For example, the stop analyzer removes all terms from the string an but this. In those cases, zero_terms_query specifies whether to match no documents (none) or all documents (all). Valid values are none and all. + // Default is none. + ZeroTermsQuery zero_terms_query = 15; + + enum ZeroTermsQuery { + + ZERO_TERMS_QUERY_INVALID = 0; + // zero_terms_query specifies whether to match all documents (all). + ZERO_TERMS_QUERY_ALL = 1; + // zero_terms_query specifies whether to match no documents (none) + ZERO_TERMS_QUERY_NONE = 2; + } + +} + +message BoolQuery { + + // Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. + .google.protobuf.FloatValue boost = 1; + + .google.protobuf.StringValue name = 2 [json_name = "_name"]; + + // The clause (query) must appear in matching documents. However, unlike `must`, the score of the query will be ignored. + repeated QueryContainer filter = 3; + + MinimumShouldMatch minimum_should_match = 4; + + // The clause (query) must appear in matching documents and will contribute to the score. + repeated QueryContainer must = 5; + + // The clause (query) must not appear in the matching documents. Because scoring is ignored, a score of `0` is returned for all documents. + repeated QueryContainer must_not = 6; + + // The clause (query) should appear in the matching document. + repeated QueryContainer should = 7; + +} + +message MinimumShouldMatch{ + oneof minimum_should_match{ + // if minimum_should_match is integer type. see "https://opensearch.org/docs/latest/query-dsl/minimum-should-match/#valid-values" + .google.protobuf.Int32Value int32_value = 1; + // if minimum_should_match is string type like percentage or combinations. see "https://opensearch.org/docs/latest/query-dsl/minimum-should-match/#valid-values" + .google.protobuf.StringValue string_value = 2; + } +} + +message BoostingQuery { + + // Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. + .google.protobuf.FloatValue boost = 1; + + .google.protobuf.StringValue name = 2 [json_name = "_name"]; + + // Floating point number between 0 and 1.0 used to decrease the relevance scores of documents matching the `negative` query. + .google.protobuf.FloatValue negative_boost = 3; + + QueryContainer negative = 4; + + QueryContainer positive = 5; + +} + +message ConstantScoreQuery { + + // Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. + .google.protobuf.FloatValue boost = 1; + + .google.protobuf.StringValue name = 2 [json_name = "_name"]; + + QueryContainer filter = 3; + +} + +message DisMaxQuery { + + // Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. + .google.protobuf.FloatValue boost = 1; + + .google.protobuf.StringValue name = 2 [json_name = "_name"]; + + // One or more query clauses. Returned documents must match one or more of these queries. If a document matches multiple queries, OpenSearch uses the highest relevance score. + repeated QueryContainer queries = 3; + + // Floating point number between 0 and 1.0 used to increase the relevance scores of documents matching multiple query clauses. + .google.protobuf.FloatValue tie_breaker = 4; + +} + +message FunctionScoreQuery { + + // Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. + .google.protobuf.FloatValue boost = 1; + + .google.protobuf.StringValue name = 2 [json_name = "_name"]; + + FunctionBoostMode boost_mode = 3; + + enum FunctionBoostMode { + + FUNCTION_BOOST_MODE_INVALID = 0; + FUNCTION_BOOST_MODE_AVG = 1; + FUNCTION_BOOST_MODE_MAX = 2; + FUNCTION_BOOST_MODE_MIN = 3; + FUNCTION_BOOST_MODE_MULTIPLY = 4; + FUNCTION_BOOST_MODE_REPLACE = 5; + FUNCTION_BOOST_MODE_SUM = 6; + } + + // One or more functions that compute a new score for each document returned by the query. + repeated FunctionScoreContainer functions = 4; + + // Restricts the new score to not exceed the provided limit. + .google.protobuf.FloatValue max_boost = 5; + + // Excludes documents that do not meet the provided score threshold. + .google.protobuf.FloatValue min_score = 6; + + QueryContainer query = 7; + + FunctionScoreMode score_mode = 8; + + enum FunctionScoreMode { + + FUNCTION_SCORE_MODE_INVALID = 0; + FUNCTION_SCORE_MODE_AVG = 1; + FUNCTION_SCORE_MODE_FIRST = 2; + FUNCTION_SCORE_MODE_MAX = 3; + FUNCTION_SCORE_MODE_MIN = 4; + FUNCTION_SCORE_MODE_MULTIPLY = 5; + FUNCTION_SCORE_MODE_SUM = 6; + } + +} + +message IntervalsAllOf { + + // An array of rules to combine. All rules must produce a match in a document for the overall source to match. + repeated IntervalsContainer intervals = 1; + + // Maximum number of positions between the matching terms. Intervals produced by the rules further apart than this are not considered matches. + .google.protobuf.Int32Value max_gaps = 2; + + // If `true`, intervals produced by the rules should appear in the order in which they are specified. + .google.protobuf.BoolValue ordered = 3; + + IntervalsFilter filter = 4; + +} + +message IntervalsAnyOf { + + // An array of rules to match. + repeated IntervalsContainer intervals = 1; + + IntervalsFilter filter = 2; + +} + +message IntervalsMatch { + + // Analyzer used to analyze terms in the query. + .google.protobuf.StringValue analyzer = 1; + + // Maximum number of positions between the matching terms. Terms further apart than this are not considered matches. + .google.protobuf.Int32Value max_gaps = 2; + + // If `true`, matching terms must appear in their specified order. + .google.protobuf.BoolValue ordered = 3; + + // Text you wish to find in the provided field. + .google.protobuf.StringValue query = 4; + + // Path to field or array of paths. Some API's support wildcards in the path to select multiple fields. + .google.protobuf.StringValue use_field = 5; + + IntervalsFilter filter = 6; + +} + +message IntervalsQuery { + + // Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. + .google.protobuf.FloatValue boost = 1; + + .google.protobuf.StringValue name = 2 [json_name = "_name"]; + + oneof intervals_query { + IntervalsAllOf all_of = 3; + + IntervalsAnyOf any_of = 4; + + IntervalsFuzzy fuzzy = 5; + + IntervalsMatch match = 6; + + IntervalsPrefix prefix = 7; + + IntervalsWildcard wildcard = 8; + } + +} + +message FunctionScoreContainer { + QueryContainer filter = 1; + + .google.protobuf.FloatValue weight = 2; + + //TODO: add decay function + oneof function_score_container { + + // Decay function not supported + // DecayFunction exp = 3; + // + // DecayFunction gauss = 4; + // + // DecayFunction linear = 5; + + FieldValueFactorScoreFunction field_value_factor = 6; + + RandomScoreFunction random_score = 7; + + ScriptScoreFunction script_score = 8; + } + +} + +message ScriptScoreFunction { + + Script script = 1; + +} + +message IntervalsFilter { + oneof intervals_filter { + IntervalsContainer after = 1; + + IntervalsContainer before = 2; + + IntervalsContainer contained_by = 3; + + IntervalsContainer containing = 4; + + IntervalsContainer not_contained_by = 5; + + IntervalsContainer not_containing = 6; + + IntervalsContainer not_overlapping = 7; + + IntervalsContainer overlapping = 8; + + Script script = 9; + } + +} + +message IntervalsContainer { + + oneof intervals_container { + IntervalsAllOf all_of = 1; + + IntervalsAnyOf any_of = 2; + + IntervalsFuzzy fuzzy = 3; + + IntervalsMatch match = 4; + + IntervalsPrefix prefix = 5; + + IntervalsWildcard wildcard = 6; + } + +} + +message PrefixQuery { + + // [optional] + // Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. + .google.protobuf.FloatValue boost = 1; + + // [optional] + // Query name for query tagging + .google.protobuf.StringValue name = 2 [json_name = "_name"]; + + // [optional] + // Determines how OpenSearch rewrites the query. + // Default is constant_score. + MultiTermQueryRewrite rewrite = 3; + enum MultiTermQueryRewrite { + + MULTI_TERM_QUERY_REWRITE_INVALID = 0; + // Uses the constant_score_boolean method for fewer matching terms. Otherwise, this method finds all matching terms in sequence and returns matching documents using a bit set. + MULTI_TERM_QUERY_REWRITE_CONSTANT_SCORE = 1; + // Assigns each document a relevance score equal to the boost parameter. + // This method changes the original query to a bool query. This bool query contains a should clause and term query for each matching term. This method can cause the final bool query to exceed the clause limit in the indices.query.bool.max_clause_count setting. If the query exceeds this limit, opensearch returns an error. + MULTI_TERM_QUERY_REWRITE_CONSTANT_SCORE_BOOLEAN = 2; + // Calculates a relevance score for each matching document. + // This method changes the original query to a bool query. This bool query contains a should clause and term query for each matching term. This method can cause the final bool query to exceed the clause limit in the indices.query.bool.max_clause_count setting. If the query exceeds this limit, Elasticsearch returns an error. + MULTI_TERM_QUERY_REWRITE_SCORING_BOOLEAN = 3; + // Calculates a relevance score for each matching document. + // This method changes the original query to a bool query. This bool query contains a should clause and term query for each matching term. The final bool query only includes term queries for the top N scoring terms. You can use this method to avoid exceeding the clause limit in the indices.query.bool.max_clause_count setting. + MULTI_TERM_QUERY_REWRITE_TOP_TERMS_N = 4; + // Calculates a relevance score for each matching document as if all terms had the same frequency. This frequency is the maximum frequency of all matching terms. + // This method changes the original query to a bool query. This bool query contains a should clause and term query for each matching term. The final bool query only includes term queries for the top N scoring terms. You can use this method to avoid exceeding the clause limit in the indices.query.bool.max_clause_count setting. + MULTI_TERM_QUERY_REWRITE_TOP_TERMS_BLENDED_FREQS_N = 5; + // Calculates a relevance score for each matching document. + // This method changes the original query to a bool query. This bool query contains a should clause and term query for each matching term. The final bool query only includes term queries for the top N scoring terms. You can use this method to avoid exceeding the clause limit in the indices.query.bool.max_clause_count setting. + MULTI_TERM_QUERY_REWRITE_TOP_TERMS_BOOST_N = 6; + } + + // [required] + // The term to search for in the field specified in . + .google.protobuf.StringValue value = 4; + + // [optional] + // Allows ASCII case insensitive matching of the value with the indexed field values when set to `true`. Default is `false` which means the case sensitivity of matching depends on the underlying field's mapping. + .google.protobuf.BoolValue case_insensitive = 5; + +} + +message TermsLookupFieldStringArrayMap { + oneof terms_lookup_field_string_array_map { + // terms_lookup_field terms you wish to find in the provided field + TermsLookupField terms_lookup_field = 1; + // string_array terms value you wish to find in the provided field + StringArray string_array = 2; + } +} + +message TermsQueryField { + // [optional] + // A floating-point value that specifies the weight of this field toward the relevance score. Values above 1.0 increase the field's relevance. Values between 0.0 and 1.0 decrease the field's relevance. + // Default is 1.0. + .google.protobuf.FloatValue boost = 1; + + // [required] + map terms_lookup_field_string_array_map = 2; +} + +message TermsLookupField { + + // [required] + // The name of the index from which to fetch field values + .google.protobuf.StringValue index = 1; + + // [required] + // The document ID of the document from which to fetch field values. + .google.protobuf.StringValue id = 2; + + // [required] + // The name of the field from which to fetch field values. Specify nested fields using dot path notation + .google.protobuf.StringValue path = 3; + + // [optional] + // Custom routing value of the document from which to fetch term values. If a custom routing value was provided when the document was indexed, this parameter is required. + repeated string routing = 4; +} + +message TermsSetQuery { + // [optional] + // Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. + .google.protobuf.FloatValue boost = 1; + + // [optional] + // Query name for query tagging + .google.protobuf.StringValue name = 2 [json_name = "_name"]; + + // [optional] + // The name of the numeric field that specifies the number of matching terms required in order to return a document in the results. + .google.protobuf.StringValue minimum_should_match_field = 3; + + // [optional] + // A script that returns the number of matching terms required in order to return a document in the results. + Script minimum_should_match_script = 4; + + // [required] + // The array of terms to search for in the field specified in . A document is returned in the results only if the required number of terms matches the document's field values exactly, with the correct spacing and capitalization. + repeated string terms = 5; + +} + +message TermQueryFieldValue { + oneof term_query_field_value{ + TermQuery term_query = 1; + FieldValue field_value = 2; + } +} + +message TermQuery { + // [optional] + // Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. + .google.protobuf.FloatValue boost = 1; + + // [optional] + // Query name for query tagging + .google.protobuf.StringValue name = 2 [json_name = "_name"]; + + // [required] + // Term you wish to find in the provided . To return a document, the term must exactly match the field value, including whitespace and capitalization. + FieldValue value = 3; + + // [optional] + // Allows ASCII case insensitive matching of the value with the indexed field values when set to `true`. When `false`, the case sensitivity of matching depends on the underlying field's mapping. + .google.protobuf.BoolValue case_insensitive = 4; + +} + + +message QueryStringQuery { + + // Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. + .google.protobuf.FloatValue boost = 1; + + .google.protobuf.StringValue name = 2 [json_name = "_name"]; + + .google.protobuf.BoolValue allow_leading_wildcard = 3; + + // Analyzer used to convert text in the query .google.protobuf.StringValue into tokens. + .google.protobuf.StringValue analyzer = 4; + + // If `true`, the query attempts to analyze wildcard terms in the query string. + .google.protobuf.BoolValue analyze_wildcard = 5; + + // If `true`, match phrase queries are automatically created for multi-term synonyms. + .google.protobuf.BoolValue auto_generate_synonyms_phrase_query = 6; + + // Path to field or array of paths. Some API's support wildcards in the path to select multiple fields. + .google.protobuf.StringValue default_field = 7; + + Operator default_operator = 8; + enum Operator { + + OPERATOR_INVALID = 0; + OPERATOR_AND = 1; + OPERATOR_OR = 2; + } + + // If `true`, enable position increments in queries constructed from a `query_string` search. + .google.protobuf.BoolValue enable_position_increments = 9; + + .google.protobuf.BoolValue escape = 10; + + repeated string fields = 11; + + Fuzziness fuzziness = 12; + + // Maximum number of terms to which the query expands for fuzzy matching. + .google.protobuf.Int32Value fuzzy_max_expansions = 13; + + // Number of beginning characters left unchanged for fuzzy matching. + .google.protobuf.Int32Value fuzzy_prefix_length = 14; + + MultiTermQueryRewrite fuzzy_rewrite = 15; + enum MultiTermQueryRewrite { + + MULTI_TERM_QUERY_REWRITE_INVALID = 0; + MULTI_TERM_QUERY_REWRITE_CONSTANT_SCORE = 1; + MULTI_TERM_QUERY_REWRITE_CONSTANT_SCORE_BOOLEAN = 2; + MULTI_TERM_QUERY_REWRITE_SCORING_BOOLEAN = 3; + MULTI_TERM_QUERY_REWRITE_TOP_TERMS_N = 4; + MULTI_TERM_QUERY_REWRITE_TOP_TERMS_BLENDED_FREQS_N = 5; + MULTI_TERM_QUERY_REWRITE_TOP_TERMS_BOOST_N = 6; + } + + // If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). + .google.protobuf.BoolValue fuzzy_transpositions = 16; + + // If `true`, format-based errors, such as providing a text value for a numeric field, are ignored. + .google.protobuf.BoolValue lenient = 17; + + // Maximum number of automaton states required for the query. + .google.protobuf.Int32Value max_determinized_states = 18; + + MinimumShouldMatch minimum_should_match = 19; + + // Maximum number of positions allowed between matching tokens for phrases. + .google.protobuf.Int32Value phrase_slop = 20; + + // Query .google.protobuf.StringValue you wish to parse and use for search. + .google.protobuf.StringValue query = 21; + + // Analyzer used to convert quoted text in the query .google.protobuf.StringValue into tokens. For quoted text, this parameter overrides the analyzer specified in the `analyzer` parameter. + .google.protobuf.StringValue quote_analyzer = 22; + + // Suffix appended to quoted text in the query string. You can use this suffix to use a different analysis method for exact matches. + .google.protobuf.StringValue quote_field_suffix = 23; + + MultiTermQueryRewrite rewrite = 24; + + // How to combine the queries generated from the individual search terms in the resulting `dis_max` query. + .google.protobuf.FloatValue tie_breaker = 25; + + .google.protobuf.StringValue time_zone = 26; + + TextQueryType type = 27; + enum TextQueryType { + + TEXT_QUERY_TYPE_INVALID = 0; + TEXT_QUERY_TYPE_BEST_FIELDS = 1; + TEXT_QUERY_TYPE_BOOL_PREFIX = 2; + TEXT_QUERY_TYPE_CROSS_FIELDS = 3; + TEXT_QUERY_TYPE_MOST_FIELDS = 4; + TEXT_QUERY_TYPE_PHRASE = 5; + TEXT_QUERY_TYPE_PHRASE_PREFIX = 6; + } + +} + +message RandomScoreFunction { + + // Path to field or array of paths. Some API's support wildcards in the path to select multiple fields. + .google.protobuf.StringValue field = 1; + + message Seed { + oneof seed { + .google.protobuf.Int32Value int32_value = 1; + .google.protobuf.StringValue string_value = 2; + } + } + Seed seed = 2; +} + +// TODO: need to revisit RangeQuery def +message RangeQuery { + oneof range_query{ + DateRangeQuery date_range_query = 1; + NumberRangeQuery number_range_query = 2; + } +} + +message RegexpQuery { + + // Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. + .google.protobuf.FloatValue boost = 1; + + .google.protobuf.StringValue name = 2 [json_name = "_name"]; + + // Allows case insensitive matching of the regular expression value with the indexed field values when set to `true`. When `false`, case sensitivity of matching depends on the underlying field's mapping. + .google.protobuf.BoolValue case_insensitive = 3; + + // Enables optional operators for the regular expression. + .google.protobuf.StringValue flags = 4; + + // Maximum number of automaton states required for the query. + .google.protobuf.Int32Value max_determinized_states = 5; + + MultiTermQueryRewrite rewrite = 6; + enum MultiTermQueryRewrite { + + MULTI_TERM_QUERY_REWRITE_INVALID = 0; + MULTI_TERM_QUERY_REWRITE_CONSTANT_SCORE = 1; + MULTI_TERM_QUERY_REWRITE_CONSTANT_SCORE_BOOLEAN = 2; + MULTI_TERM_QUERY_REWRITE_SCORING_BOOLEAN = 3; + MULTI_TERM_QUERY_REWRITE_TOP_TERMS_N = 4; + MULTI_TERM_QUERY_REWRITE_TOP_TERMS_BLENDED_FREQS_N = 5; + MULTI_TERM_QUERY_REWRITE_TOP_TERMS_BOOST_N = 6; + } + + // Regular expression for terms you wish to find in the provided field. + .google.protobuf.StringValue value = 7; + +} + +message DateRangeQuery { + + // Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. + .google.protobuf.FloatValue boost = 1; + + .google.protobuf.StringValue name = 2 [json_name = "_name"]; + + RangeRelation relation = 3; + enum RangeRelation { + + RANGE_RELATION_INVALID = 0; + RANGE_RELATION_CONTAINS = 1; + RANGE_RELATION_INTERSECTS = 2; + RANGE_RELATION_WITHIN = 3; + } + + .google.protobuf.StringValue gt = 4; + + .google.protobuf.StringValue gte = 5; + + .google.protobuf.StringValue lt = 6; + + .google.protobuf.StringValue lte = 7; + + message From { + oneof from { + .google.protobuf.StringValue string_value = 1; + NullValue null_value = 2; + } + } + + From from = 8; + + message To { + oneof to { + .google.protobuf.StringValue string_value = 1; + NullValue null_value = 2; + } + } + To to = 9; + + .google.protobuf.StringValue format = 10; + + .google.protobuf.StringValue time_zone = 11; + +} + +message NumberRangeQuery { + + // Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. + .google.protobuf.FloatValue boost = 1; + + .google.protobuf.StringValue name = 2 [json_name = "_name"]; + + RangeRelation relation = 3; + enum RangeRelation { + + RANGE_RELATION_INVALID = 0; + RANGE_RELATION_CONTAINS = 1; + RANGE_RELATION_INTERSECTS = 2; + RANGE_RELATION_WITHIN = 3; + } + + + // Greater than. + GeneralNumber gt = 4; + + // Greater than or equal to. + GeneralNumber gte = 5; + + // Less than. + GeneralNumber lt = 6; + + // Less than or equal to. + GeneralNumber lte = 7; + + message From { + oneof from { + .google.protobuf.StringValue string_value = 1; + GeneralNumber general_number = 2; + NullValue null_value = 3; + } + } + From from = 8; + + message To { + oneof to { + .google.protobuf.StringValue string_value = 1; + GeneralNumber general_number = 2; + NullValue null_value = 3; + } + } + To to = 9; +} + +message FuzzyQuery { + + // [optional] + // Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. + .google.protobuf.FloatValue boost = 1; + + // [optional] + // Query name for query tagging + .google.protobuf.StringValue name = 2 [json_name = "_name"]; + + // [optional] + // The maximum number of terms to which the query can expand. Fuzzy queries “expand to” a number of matching terms that are within the distance specified in fuzziness. Then OpenSearch tries to match those terms. Default is 50. + .google.protobuf.Int32Value max_expansions = 3; + + // [optional] + // The number of leading characters that are not considered in fuzziness. Default is 0. + .google.protobuf.Int32Value prefix_length = 4; + + // [optional] + // Determines how OpenSearch rewrites the query. + // Default is constant_score. + MultiTermQueryRewrite rewrite = 5; + enum MultiTermQueryRewrite { + + MULTI_TERM_QUERY_REWRITE_INVALID = 0; + // Uses the constant_score_boolean method for fewer matching terms. Otherwise, this method finds all matching terms in sequence and returns matching documents using a bit set. + MULTI_TERM_QUERY_REWRITE_CONSTANT_SCORE = 1; + // Assigns each document a relevance score equal to the boost parameter. + // This method changes the original query to a bool query. This bool query contains a should clause and term query for each matching term. This method can cause the final bool query to exceed the clause limit in the indices.query.bool.max_clause_count setting. If the query exceeds this limit, opensearch returns an error. + MULTI_TERM_QUERY_REWRITE_CONSTANT_SCORE_BOOLEAN = 2; + // Calculates a relevance score for each matching document. + // This method changes the original query to a bool query. This bool query contains a should clause and term query for each matching term. This method can cause the final bool query to exceed the clause limit in the indices.query.bool.max_clause_count setting. If the query exceeds this limit, Elasticsearch returns an error. + MULTI_TERM_QUERY_REWRITE_SCORING_BOOLEAN = 3; + // Calculates a relevance score for each matching document. + // This method changes the original query to a bool query. This bool query contains a should clause and term query for each matching term. The final bool query only includes term queries for the top N scoring terms. You can use this method to avoid exceeding the clause limit in the indices.query.bool.max_clause_count setting. + MULTI_TERM_QUERY_REWRITE_TOP_TERMS_N = 4; + // Calculates a relevance score for each matching document as if all terms had the same frequency. This frequency is the maximum frequency of all matching terms. + // This method changes the original query to a bool query. This bool query contains a should clause and term query for each matching term. The final bool query only includes term queries for the top N scoring terms. You can use this method to avoid exceeding the clause limit in the indices.query.bool.max_clause_count setting. + MULTI_TERM_QUERY_REWRITE_TOP_TERMS_BLENDED_FREQS_N = 5; + // Calculates a relevance score for each matching document. + // This method changes the original query to a bool query. This bool query contains a should clause and term query for each matching term. The final bool query only includes term queries for the top N scoring terms. You can use this method to avoid exceeding the clause limit in the indices.query.bool.max_clause_count setting. + MULTI_TERM_QUERY_REWRITE_TOP_TERMS_BOOST_N = 6; + } + // [optional] + // Specifies whether to allow transpositions of two adjacent characters (ab to ba) as edits. Default is true. + .google.protobuf.BoolValue transpositions = 6; + // [optional] + // The number of character edits (insert, delete, substitute) needed to change one word to another when determining whether a term matched a value. + Fuzziness fuzziness = 7; + + message Value { + oneof value { + .google.protobuf.StringValue string_value = 1; + .google.protobuf.BoolValue bool_value = 2; + GeneralNumber general_number = 3; + } + } + + // [required] + // Term you wish to find in the provided . + Value value = 8; +} + +message Fuzziness{ + + oneof fuzziness{ + // AUTO: Generates an edit distance based on the length of the term. Low and high distance arguments may be optionally provided AUTO:[low],[high]. AUTO should generally be the preferred value for fuzziness. + .google.protobuf.StringValue string_value = 1; + // 0,1,2: The maximum allowed Levenshtein Edit Distance (or number of edits) + .google.protobuf.Int32Value int32_value = 2; + } + +} + +message FieldValue { + oneof type{ + GeneralNumber general_number = 1; + .google.protobuf.StringValue string_value = 2; + ObjectMap object_map = 3; + .google.protobuf.BoolValue bool_value = 4; + } +} + +message FieldValueResponse { + oneof value{ + .google.protobuf.DoubleValue double_value = 1; + .google.protobuf.StringValue string_value = 2; + .google.protobuf.Struct object = 3; + .google.protobuf.BoolValue bool_value = 4; + } +} + +message IdsQuery { + + .google.protobuf.FloatValue boost = 1; + + .google.protobuf.StringValue name = 2 [json_name = "_name"]; + + repeated string values = 3; + +} + +message IntervalsFuzzy { + + // Analyzer used to normalize the term. + .google.protobuf.StringValue analyzer = 1; + + Fuzziness fuzziness = 2; + + // Number of beginning characters left unchanged when creating expansions. + .google.protobuf.Int32Value prefix_length = 3; + + // The term to match. + .google.protobuf.StringValue term = 4; + + // Indicates whether edits include transpositions of two adjacent characters (for example, `ab` to `ba`). + .google.protobuf.BoolValue transpositions = 5; + + // Path to field or array of paths. Some API's support wildcards in the path to select multiple fields. + .google.protobuf.StringValue use_field = 6; + +} + +message IntervalsPrefix { + + // Analyzer used to analyze the `prefix`. + .google.protobuf.StringValue analyzer = 1; + + // Beginning characters of terms you wish to find in the top-level field. + .google.protobuf.StringValue prefix = 2; + + // Path to field or array of paths. Some API's support wildcards in the path to select multiple fields. + .google.protobuf.StringValue use_field = 3; + +} + +message IntervalsWildcard { + + // Analyzer used to analyze the `pattern`. Defaults to the top-level field's analyzer. + .google.protobuf.StringValue analyzer = 1; + + // Wildcard pattern used to find matching terms. + .google.protobuf.StringValue pattern = 2; + + // Path to field or array of paths. Some API's support wildcards in the path to select multiple fields. + .google.protobuf.StringValue use_field = 3; + +} + +message MatchAllQuery { + + // [optional] + // Boosts the clause by the given multiplier. Useful for weighing clauses in compound queries. Values in the [0, 1) range decrease relevance, and values greater than 1 increase relevance. Default is 1. + .google.protobuf.FloatValue boost = 1; + + // [optional] + // Query name for query tagging + .google.protobuf.StringValue name = 2 [json_name = "_name"]; +} + +message MatchBoolPrefixQuery { + // [optional] + // Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. + .google.protobuf.FloatValue boost = 1; + + // [optional] + // Query name for query tagging + .google.protobuf.StringValue name = 2 [json_name = "_name"]; + + // [optional] + // The analyzer used to tokenize the query string text. + // Default is the index-time analyzer specified for the default_field. If no analyzer is specified for the default_field, the analyzer is the default analyzer for the index. + .google.protobuf.StringValue analyzer = 3; + + // [optional] + // The number of character edits (insert, delete, substitute) that it takes to change one word to another when determining whether a term matched a value. + // For example, the distance between wined and wind is 1. + // The default, AUTO, chooses a value based on the length of each term and is a good choice for most use cases. + Fuzziness fuzziness = 4; + + // [optional] + // Determines how OpenSearch rewrites the query. + // Default is constant_score. + MultiTermQueryRewrite fuzzy_rewrite = 5; + enum MultiTermQueryRewrite { + + MULTI_TERM_QUERY_REWRITE_INVALID = 0; + // Uses the constant_score_boolean method for fewer matching terms. Otherwise, this method finds all matching terms in sequence and returns matching documents using a bit set. + MULTI_TERM_QUERY_REWRITE_CONSTANT_SCORE = 1; + // Assigns each document a relevance score equal to the boost parameter. + // This method changes the original query to a bool query. This bool query contains a should clause and term query for each matching term. This method can cause the final bool query to exceed the clause limit in the indices.query.bool.max_clause_count setting. If the query exceeds this limit, opensearch returns an error. + MULTI_TERM_QUERY_REWRITE_CONSTANT_SCORE_BOOLEAN = 2; + // Calculates a relevance score for each matching document. + // This method changes the original query to a bool query. This bool query contains a should clause and term query for each matching term. This method can cause the final bool query to exceed the clause limit in the indices.query.bool.max_clause_count setting. If the query exceeds this limit, Elasticsearch returns an error. + MULTI_TERM_QUERY_REWRITE_SCORING_BOOLEAN = 3; + // Calculates a relevance score for each matching document. + // This method changes the original query to a bool query. This bool query contains a should clause and term query for each matching term. The final bool query only includes term queries for the top N scoring terms. You can use this method to avoid exceeding the clause limit in the indices.query.bool.max_clause_count setting. + MULTI_TERM_QUERY_REWRITE_TOP_TERMS_N = 4; + // Calculates a relevance score for each matching document as if all terms had the same frequency. This frequency is the maximum frequency of all matching terms. + // This method changes the original query to a bool query. This bool query contains a should clause and term query for each matching term. The final bool query only includes term queries for the top N scoring terms. You can use this method to avoid exceeding the clause limit in the indices.query.bool.max_clause_count setting. + MULTI_TERM_QUERY_REWRITE_TOP_TERMS_BLENDED_FREQS_N = 5; + // Calculates a relevance score for each matching document. + // This method changes the original query to a bool query. This bool query contains a should clause and term query for each matching term. The final bool query only includes term queries for the top N scoring terms. You can use this method to avoid exceeding the clause limit in the indices.query.bool.max_clause_count setting. + MULTI_TERM_QUERY_REWRITE_TOP_TERMS_BOOST_N = 6; + } + + // [optional] + // Setting fuzzy_transpositions to true (default) adds swaps of adjacent characters to the insert, delete, and substitute operations of the fuzziness option. + // For example, the distance between wind and wnid is 1 if fuzzy_transpositions is true (swap “n” and “i”) and 2 if it is false (delete “n”, insert “n”). If fuzzy_transpositions is false, rewind and wnid have the same distance (2) from wind, despite the more human-centric opinion that wnid is an obvious typo. + // The default(true) is a good choice for most use cases. + .google.protobuf.BoolValue fuzzy_transpositions = 6; + + // [optional] + // The maximum number of terms to which the query can expand. Fuzzy queries “expand to” a number of matching terms that are within the distance specified in fuzziness. Then OpenSearch tries to match those terms. + // Default is 50. + .google.protobuf.Int32Value max_expansions = 7; + + // [optional] + // If the query string contains multiple search terms and you use the or operator, the number of terms that need to match for the document to be considered a match. + // For example, if minimum_should_match is 2, wind often rising does not match The Wind Rises. If minimum_should_match is 1, it matches. + MinimumShouldMatch minimum_should_match = 8; + + // [optional] + // If the query string contains multiple search terms, whether all terms need to match (and) or only one term needs to match (or) for a document to be considered a match. + // Default is or. + Operator operator = 9; + enum Operator { + + OPERATOR_INVALID = 0; + // All terms need to match. The string `to be` is interpreted as `to AND be` + OPERATOR_AND = 1; + // Only one term needs to match. The string `to be` is interpreted as `to OR be` + OPERATOR_OR = 2; + } + + // [optional] + // The number of leading characters that are not considered in fuzziness. + // Default is 0. + .google.protobuf.Int32Value prefix_length = 10; + + // [required] + // Terms you wish to find in the provided field. The last term is used in a prefix query. + .google.protobuf.StringValue query = 11; + +} + +message MatchNoneQuery { + + .google.protobuf.FloatValue boost = 1; + + .google.protobuf.StringValue name = 2 [json_name = "_name"]; + + ObjectMap object = 3;; +} + +message MatchPhrasePrefixQuery { + // [optional] + // Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. + .google.protobuf.FloatValue boost = 1; + + // [optional] + // Query name for query tagging + .google.protobuf.StringValue name = 2 [json_name = "_name"]; + + // [optional] + // The analyzer used to tokenize the query string text. + // Default is the index-time analyzer specified for the default_field. If no analyzer is specified for the default_field, the analyzer is the default analyzer for the index. + .google.protobuf.StringValue analyzer = 3; + + // [optional] + // The maximum number of terms to which the query can expand. Fuzzy queries “expand to” a number of matching terms that are within the distance specified in fuzziness. Then OpenSearch tries to match those terms. + // Default is 50. + .google.protobuf.Int32Value max_expansions = 4; + + // [required] + // The query string to use for search + .google.protobuf.StringValue query = 5; + + // [optional] + // Controls the degree to which words in a query can be misordered and still be considered a match. From the Lucene documentation: “The number of other words permitted between words in query phrase. + // For example, to switch the order of two words requires two moves (the first move places the words atop one another), so to permit reorderings of phrases, the slop must be at least two. A value of zero requires an exact match.” + .google.protobuf.Int32Value slop = 6; + + // [optional] + // In some cases, the analyzer removes all terms from a query string. For example, the stop analyzer removes all terms from the string an but this. In those cases, zero_terms_query specifies whether to match no documents (none) or all documents (all). Valid values are none and all. + // Default is none. + ZeroTermsQuery zero_terms_query = 7; + enum ZeroTermsQuery { + + ZERO_TERMS_QUERY_INVALID = 0; + // zero_terms_query specifies whether to match all documents (all). + ZERO_TERMS_QUERY_ALL = 1; + // zero_terms_query specifies whether to match no documents (none) + ZERO_TERMS_QUERY_NONE = 2; + } + +} + +message MatchPhraseQuery { + + // [optional] + // Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. + .google.protobuf.FloatValue boost = 1; + + // [optional] + // Query name for query tagging + .google.protobuf.StringValue name = 2 [json_name = "_name"]; + + // [optional] + // The analyzer used to tokenize the query string text. + // Default is the index-time analyzer specified for the default_field. If no analyzer is specified for the default_field, the analyzer is the default analyzer for the index. + .google.protobuf.StringValue analyzer = 3; + + // [required] + // The query string to use for search. + .google.protobuf.StringValue query = 4; + + // [optional] + // Controls the degree to which words in a query can be misordered and still be considered a match. From the Lucene documentation: “The number of other words permitted between words in query phrase. For example, to switch the order of two words requires two moves (the first move places the words atop one another), so to permit reorderings of phrases, the slop must be at least two. A value of zero requires an exact match.” + .google.protobuf.Int32Value slop = 5; + + // [optional] + // In some cases, the analyzer removes all terms from a query string. For example, the stop analyzer removes all terms from the string an but this. In those cases, zero_terms_query specifies whether to match no documents (none) or all documents (all). Valid values are none and all. + // Default is none. + ZeroTermsQuery zero_terms_query = 6; + enum ZeroTermsQuery { + + ZERO_TERMS_QUERY_INVALID = 0; + // zero_terms_query specifies whether to match all documents (all). + ZERO_TERMS_QUERY_ALL = 1; + // zero_terms_query specifies whether to match no documents (none) + ZERO_TERMS_QUERY_NONE = 2; + } + +} + +message MultiMatchQuery { + + // Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. + .google.protobuf.FloatValue boost = 1; + + .google.protobuf.StringValue name = 2 [json_name = "_name"]; + + // Analyzer used to convert the text in the query value into tokens. + .google.protobuf.StringValue analyzer = 3; + + // If `true`, match phrase queries are automatically created for multi-term synonyms. + .google.protobuf.BoolValue auto_generate_synonyms_phrase_query = 4; + + .google.protobuf.FloatValue cutoff_frequency = 5; + + repeated string fields = 6; + + Fuzziness fuzziness = 7; + + .google.protobuf.StringValue fuzzy_rewrite = 8; + + // If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). Can be applied to the term subqueries constructed for all terms but the final term. + MultiTermQueryRewrite fuzzy_transpositions = 9; + enum MultiTermQueryRewrite { + + MULTI_TERM_QUERY_REWRITE_INVALID = 0; + MULTI_TERM_QUERY_REWRITE_CONSTANT_SCORE = 1; + MULTI_TERM_QUERY_REWRITE_CONSTANT_SCORE_BOOLEAN = 2; + MULTI_TERM_QUERY_REWRITE_SCORING_BOOLEAN = 3; + MULTI_TERM_QUERY_REWRITE_TOP_TERMS_N = 4; + MULTI_TERM_QUERY_REWRITE_TOP_TERMS_BLENDED_FREQS_N = 5; + MULTI_TERM_QUERY_REWRITE_TOP_TERMS_BOOST_N = 6; + } + + // If `true`, format-based errors, such as providing a text query value for a numeric field, are ignored. + .google.protobuf.BoolValue lenient = 10; + + // Maximum number of terms to which the query will expand. + .google.protobuf.Int32Value max_expansions = 11; + + MinimumShouldMatch minimum_should_match = 12; + + Operator operator = 13; + enum Operator { + + OPERATOR_INVALID = 0; + OPERATOR_AND = 1; + OPERATOR_OR = 2; + } + + // Number of beginning characters left unchanged for fuzzy matching. + .google.protobuf.Int32Value prefix_length = 14; + + // Text, number, boolean value or date you wish to find in the provided field. + .google.protobuf.StringValue query = 15; + + // Maximum number of positions allowed between matching tokens. + .google.protobuf.Int32Value slop = 16; + + // Determines how scores for each per-term blended query and scores across groups are combined. + .google.protobuf.FloatValue tie_breaker = 17; + + TextQueryType type = 18; + enum TextQueryType { + + TEXT_QUERY_TYPE_INVALID = 0; + TEXT_QUERY_TYPE_BEST_FIELDS = 1; + TEXT_QUERY_TYPE_BOOL_PREFIX = 2; + TEXT_QUERY_TYPE_CROSS_FIELDS = 3; + TEXT_QUERY_TYPE_MOST_FIELDS = 4; + TEXT_QUERY_TYPE_PHRASE = 5; + TEXT_QUERY_TYPE_PHRASE_PREFIX = 6; + } + + ZeroTermsQuery zero_terms_query = 19; + enum ZeroTermsQuery { + + ZERO_TERMS_QUERY_INVALID = 0; + ZERO_TERMS_QUERY_ALL = 1; + ZERO_TERMS_QUERY_NONE = 2; + } + +} + +message FieldValueFactorScoreFunction { + + // Path to field or array of paths. Some API's support wildcards in the path to select multiple fields. + .google.protobuf.StringValue field = 1; + + // Optional factor to multiply the field value with. + .google.protobuf.FloatValue factor = 2; + + // Value used if the document doesn't have that field. The modifier and factor are still applied to it as though it were read from the document. + .google.protobuf.DoubleValue missing = 3; + + FieldValueFactorModifier modifier = 4; + + enum FieldValueFactorModifier { + + FIELD_VALUE_FACTOR_MODIFIER_INVALID = 0; + FIELD_VALUE_FACTOR_MODIFIER_LN = 1; + FIELD_VALUE_FACTOR_MODIFIER_LN1P = 2; + FIELD_VALUE_FACTOR_MODIFIER_LN2P = 3; + FIELD_VALUE_FACTOR_MODIFIER_LOG = 4; + FIELD_VALUE_FACTOR_MODIFIER_LOG1P = 5; + FIELD_VALUE_FACTOR_MODIFIER_LOG2P = 6; + FIELD_VALUE_FACTOR_MODIFIER_NONE = 7; + FIELD_VALUE_FACTOR_MODIFIER_RECIPROCAL = 8; + FIELD_VALUE_FACTOR_MODIFIER_SQRT = 9; + FIELD_VALUE_FACTOR_MODIFIER_SQUARE = 10; + } + +} + +message DutchAnalyzer{ + enum Type { + + TYPE_INVALID = 0; + TYPE_DUTCH = 1; + } + Type type = 1; + + repeated string stopwords = 2; +} + +message FingerprintAnalyzer { + enum Type { + + TYPE_INVALID = 0; + TYPE_FINGERPRINT = 1; + } + Type type = 1; + + .google.protobuf.StringValue version = 2; + + .google.protobuf.FloatValue max_output_size = 3; + + .google.protobuf.BoolValue preserve_original = 4; + + .google.protobuf.StringValue separator = 5; + + // Language value, such as _arabic_ or _thai_. Defaults to _english_. Each language value corresponds to a predefined list of stop words in Lucene. See Stop words by language for supported language values and their stop words. Also accepts an array of stop words. + repeated string stopwords = 6; + + .google.protobuf.StringValue stopwords_path = 7; +} + +message IcuAnalyzer{ + enum Type { + + TYPE_INVALID = 0; + TYPE_ICU_ANALYZER = 1; + } + Type type = 1; + + enum IcuNormalizationType { + + ICU_NORMALIZATION_TYPE_INVALID = 0; + ICU_NORMALIZATION_TYPE_NFC = 1; + ICU_NORMALIZATION_TYPE_NFKC = 2; + ICU_NORMALIZATION_TYPE_NFKC_CF = 3; + } + IcuNormalizationType method = 2; + + enum IcuNormalizationMode { + + ICU_NORMALIZATION_MODE_INVALID = 0; + ICU_NORMALIZATION_MODE_COMPOSE = 1; + ICU_NORMALIZATION_MODE_DECOMPOSE = 2; + } + + IcuNormalizationMode mode = 3; +} + +//manual generate +message KeywordAnalyzer{ + enum Type { + + TYPE_INVALID = 0; + TYPE_KEYWORD = 1; + } + Type type = 1; + + .google.protobuf.StringValue version = 2; +} + +message KuromojiAnalyzer{ + enum Type { + + TYPE_INVALID = 0; + TYPE_KUROMOJI = 1; + } + Type type = 1; + + + enum Mode { + + MODE_INVALID = 0; + MODE_EXTENDED = 1; + MODE_NORMAL = 2; + MODE_SEARCH = 3; + } + + Mode mode = 2; + + .google.protobuf.StringValue user_dictionary = 3; +} + + +message LanguageAnalyzer{ + enum Type { + + TYPE_INVALID = 0; + TYPE_LANGUAGE = 1; + } + Type type = 1; + + .google.protobuf.StringValue version = 2; + + enum Language { + + LANGUAGE_INVALID = 0; + LANGUAGE_ARABIC = 1; + LANGUAGE_ARMENIAN = 2; + LANGUAGE_BASQUE = 3; + LANGUAGE_BRAZILIAN = 4; + LANGUAGE_BULGARIAN = 5; + LANGUAGE_CATALAN = 6; + LANGUAGE_CHINESE = 7; + LANGUAGE_CJK = 8; + LANGUAGE_CZECH = 9; + LANGUAGE_DANISH = 10; + LANGUAGE_DUTCH = 11; + LANGUAGE_ENGLISH = 12; + LANGUAGE_ESTONIAN = 13; + LANGUAGE_FINNISH = 14; + LANGUAGE_FRENCH = 15; + LANGUAGE_GALICIAN = 16; + LANGUAGE_GERMAN = 17; + LANGUAGE_GREEK = 18; + LANGUAGE_HINDI = 19; + LANGUAGE_HUNGARIAN = 20; + LANGUAGE_INDONESIAN = 21; + LANGUAGE_IRISH = 22; + LANGUAGE_ITALIAN = 23; + LANGUAGE_LATVIAN = 24; + LANGUAGE_NORWEGIAN = 25; + LANGUAGE_PERSIAN = 26; + LANGUAGE_PORTUGUESE = 27; + LANGUAGE_ROMANIAN = 28; + LANGUAGE_RUSSIAN = 29; + LANGUAGE_SORANI = 30; + LANGUAGE_SPANISH = 31; + LANGUAGE_SWEDISH = 32; + LANGUAGE_THAI = 33; + LANGUAGE_TURKISH = 34; + } + + Language language = 3; + + repeated string stem_exclusion = 4; + + // Language value, such as _arabic_ or _thai_. Defaults to _english_. Each language value corresponds to a predefined list of stop words in Lucene. See Stop words by language for supported language values and their stop words. Also accepts an array of stop words. + repeated string stopwords = 5; + + .google.protobuf.StringValue stopwords_path = 6; + +} + +message NoriAnalyzer{ + enum Type { + + TYPE_INVALID = 0; + TYPE_NORI = 1; + } + Type type = 1; + + .google.protobuf.StringValue version = 2; + + enum NoriDecompoundMode { + + NORI_DECOMPOUND_MODE_INVALID = 0; + NORI_DECOMPOUND_MODE_NONE = 1; + NORI_DECOMPOUND_MODE_DISCARD = 2; + NORI_DECOMPOUND_MODE_MIXED = 3; + } + + NoriDecompoundMode decompound_mode = 3; + + repeated string stoptags = 4; + + .google.protobuf.StringValue user_dictionary = 5; +} + +message PatternAnalyzer{ + enum Type { + + TYPE_INVALID = 0; + TYPE_PATTERN = 1; + } + Type type = 1; + + .google.protobuf.StringValue version = 2; + + .google.protobuf.StringValue flags = 3; + + .google.protobuf.BoolValue lowercase = 4; + + .google.protobuf.StringValue pattern = 5; + + repeated string stopwords = 6; + +} + +message SimpleAnalyzer{ + enum Type { + + TYPE_INVALID = 0; + TYPE_SIMPLE = 1; + } + Type type = 1; + + .google.protobuf.StringValue version = 2; +} + +message SnowballAnalyzer{ + enum Type { + + TYPE_INVALID = 0; + TYPE_SNOWBALL = 1; + } + Type type = 1; + + .google.protobuf.StringValue version = 2; + + enum SnowballLanguage { + + SNOWBALL_LANGUAGE_INVALID = 0; + SNOWBALL_LANGUAGE_ARMENIAN = 1; + SNOWBALL_LANGUAGE_BASQUE = 2; + SNOWBALL_LANGUAGE_CATALAN = 3; + SNOWBALL_LANGUAGE_DANISH = 4; + SNOWBALL_LANGUAGE_DUTCH = 5; + SNOWBALL_LANGUAGE_ENGLISH = 6; + SNOWBALL_LANGUAGE_FINNISH = 7; + SNOWBALL_LANGUAGE_FRENCH = 8; + SNOWBALL_LANGUAGE_GERMAN = 9; + SNOWBALL_LANGUAGE_GERMAN2 = 10; + SNOWBALL_LANGUAGE_HUNGARIAN = 11; + SNOWBALL_LANGUAGE_ITALIAN = 12; + SNOWBALL_LANGUAGE_KP = 13; + SNOWBALL_LANGUAGE_LOVINS = 14; + SNOWBALL_LANGUAGE_NORWEGIAN = 15; + SNOWBALL_LANGUAGE_PORTER = 16; + SNOWBALL_LANGUAGE_PORTUGUESE = 17; + SNOWBALL_LANGUAGE_ROMANIAN = 18; + SNOWBALL_LANGUAGE_RUSSIAN = 19; + SNOWBALL_LANGUAGE_SPANISH = 20; + SNOWBALL_LANGUAGE_SWEDISH = 21; + SNOWBALL_LANGUAGE_TURKISH = 22; + } + + SnowballLanguage language = 3; + + repeated string stopwords = 4; +} + +message StandardAnalyzer{ + enum Type { + + TYPE_INVALID = 0; + TYPE_STANDARD = 1; + } + Type type = 1; + + .google.protobuf.FloatValue max_token_length = 2; + repeated string stopwords = 3; +} + +message StopAnalyzer{ + enum Type { + + TYPE_INVALID = 0; + TYPE_STOP = 1; + } + Type type = 1; + + .google.protobuf.StringValue version = 2; + + repeated string stopwords = 3; + + .google.protobuf.StringValue stopwords_path = 4; +} + +message WhitespaceAnalyzer{ + enum Type { + + TYPE_INVALID = 0; + TYPE_WHITESPACE = 1; + } + Type type = 1; + + .google.protobuf.StringValue version = 2; + +} + +message CustomAnalyzer { + enum Type { + + TYPE_INVALID = 0; + TYPE_CUSTOM = 1; + } + + Type type = 1; + + repeated string char_filter = 2; + + repeated string filter = 3; + + .google.protobuf.FloatValue position_increment_gap = 4; + + .google.protobuf.FloatValue position_offset_gap = 5; + + .google.protobuf.StringValue tokenizer = 6; +} + +message Analyzer { + oneof analyzer { + CustomAnalyzer custom_analyzer = 1; + FingerprintAnalyzer fingerprint_analyzer = 2; + KeywordAnalyzer keyword_analyzer = 3; + LanguageAnalyzer language_analyzer = 4; + NoriAnalyzer nori_analyzer = 5; + PatternAnalyzer pattern_analyzer = 6; + SimpleAnalyzer simple_analyzer = 7; + StandardAnalyzer standard_analyzer = 8; + StopAnalyzer stop_analyzer = 9; + WhitespaceAnalyzer whitespace_analyzer = 10; + IcuAnalyzer icu_analyzer = 11; + KuromojiAnalyzer kuromoji_analyzer = 12; + SnowballAnalyzer snowball_analyzer = 13; + DutchAnalyzer dutch_analyzer = 14; + } +} + +message OpenSearchException { + oneof version { + OpenSearchExceptionV1 v1 = 1; + } +} + + +message OpenSearchExceptionV1 { + .google.protobuf.StringValue type = 1; + .google.protobuf.StringValue reason = 2; + repeated .google.protobuf.Struct root_cause = 3; + + .google.protobuf.Struct caused_by = 4; + .google.protobuf.StringValue stack_trace = 5; + .google.protobuf.Struct suppressed = 6; + + .google.protobuf.Struct additional_details = 7; +} + +message InlineGet { + + .google.protobuf.Struct fields = 1; + + .google.protobuf.BoolValue found = 2; + + .google.protobuf.Int64Value seq_no = 3 [json_name = "_seq_no"]; + + .google.protobuf.Int64Value primary_term = 4 [json_name = "_primary_term"]; + + repeated string routing = 5 [json_name = "_routing"]; + + .google.protobuf.Struct source = 6 [json_name = "_source"]; + +} diff --git a/server/src/main/proto/spec/document.proto b/server/src/main/proto/spec/document.proto new file mode 100644 index 0000000000000..e1aac9011b5e8 --- /dev/null +++ b/server/src/main/proto/spec/document.proto @@ -0,0 +1,1015 @@ +syntax = "proto3"; + +option java_multiple_files = true; +option java_package = "opensearch.protos"; +option java_outer_classname = "DocumentProto"; +option go_package = "opensearchpb"; + +import "google/protobuf/struct.proto"; +import "google/protobuf/wrappers.proto"; +import "spec/common.proto"; + +// The bulk operation lets you add, update, or delete multiple documents in a single request, index name needs to be specified in `BulkRequestBody` +message BulkRequest { + // [optional] Set `true` or `false` to return the `_source` field or not, or a list of fields to return. + SourceConfigParam source = 1 [json_name = "_source"]; + // [optional] A list of source fields to exclude from the response. + repeated string source_excludes = 2; + // [optional] A list of source fields to include in the response. +// repeated string source_includes = 3 [json_name = "_source_includes",; + // [optional] ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. + .google.protobuf.StringValue pipeline = 4; + + enum Refresh { + + REFRESH_INVALID = 0; + // `REFRESH_FALSE` do nothing with refreshes. + REFRESH_FALSE = 1; + // `REFRESH_TRUE` makes the changes show up in search results immediately, but hurts cluster performance. + REFRESH_TRUE = 2; + // `REFRESH_WAIT_FOR` waits for a refresh. Requests take longer to return, but cluster performance doesn't suffer. + REFRESH_WAIT_FOR = 3; + } + // [optional] The enum of whether to refresh the affected shards after performing the indexing operations. Default is false + Refresh refresh = 5; + // [optional] If `true`, the request's actions must target an index alias. Defaults to false. + .google.protobuf.BoolValue require_alias = 6; + // [optional] Custom value used to route operations to a specific shard. + repeated string routing = 7; + // [optional] Period each action waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. + // pattern: ^([0-9\.]+)(?:d|h|m|s|ms|micros|nanos)$ + // Defaults to 1m (one minute). This guarantees OpenSearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. + .google.protobuf.StringValue timeout = 8; + // [deprecated] The default document type for documents that don't specify a type. Default is _doc. We highly recommend ignoring this parameter and using a type of _doc for all indexes. + .google.protobuf.StringValue type = 9; + // [optional] The number of active shards that must be available before OpenSearch processes the request. Default is 1 (only the primary shard). Set to all or a positive integer. Values greater than 1 require replicas. For example, if you specify a value of 3, the index must have two replicas distributed across two additional nodes for the operation to succeed. + WaitForActiveShards wait_for_active_shards = 10; + // [required] The request body contains create, delete, index, and update actions and their associated source data + repeated BulkRequestBody request_body = 11; +} + +// The index bulk operation lets you add, update, or delete multiple documents in a single request. +message IndexBulkRequest { + // [required] The data stream, index, or index alias to perform the action on + .google.protobuf.StringValue index = 1; + // [optional] Set `true` or `false` to return the `_source` field or not, or a list of fields to return. + SourceConfigParam source = 2 [json_name = "_source"]; + // [optional] A list of source fields to exclude from the response. + repeated string source_excludes = 3; + // [optional] A list of source fields to include in the response. +// repeated string source_includes = 4 [json_name = "_source_includes",; + // [optional] ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. + .google.protobuf.StringValue pipeline = 5; + + enum Refresh { + + REFRESH_INVALID = 0; + // `REFRESH_FALSE` do nothing with refreshes. + REFRESH_FALSE = 1; + // `REFRESH_TRUE` makes the changes show up in search results immediately, but hurts cluster performance. + REFRESH_TRUE = 2; + // `REFRESH_WAIT_FOR` waits for a refresh. Requests take longer to return, but cluster performance doesn't suffer. + REFRESH_WAIT_FOR = 3; + } + // [optional] The enum of whether to refresh the affected shards after performing the indexing operations. Default is false + Refresh refresh = 6; + // [optional] If `true`, the request's actions must target an index alias. Defaults to false. + .google.protobuf.BoolValue require_alias = 7; + // [optional] Custom value used to route operations to a specific shard. + repeated string routing = 8; + // [optional] Period each action waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. + // pattern: ^([0-9\.]+)(?:d|h|m|s|ms|micros|nanos)$ + // Defaults to 1m (one minute). This guarantees OpenSearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. + .google.protobuf.StringValue timeout = 9; + // [deprecated] The default document type for documents that don't specify a type. Default is _doc. We highly recommend ignoring this parameter and using a type of _doc for all indexes. + .google.protobuf.StringValue type = 10; + // [optional] The number of active shards that must be available before OpenSearch processes the request. Default is 1 (only the primary shard). Set to all or a positive integer. Values greater than 1 require replicas. For example, if you specify a value of 3, the index must have two replicas distributed across two additional nodes for the operation to succeed. + WaitForActiveShards wait_for_active_shards = 11; + // [required] The request body contains create, delete, index, and update actions and their associated source data + repeated BulkRequestBody request_body = 12; +} + +message BulkRequestBody { + + // [required] operation to perform (index, create, update, or delete) + oneof operation_container { + // Indexes the specified document. If the document exists, replaces the document and increments the version. It must followed with source data to be indexed in `doc` field. + IndexOperation index = 1; + // Indexes the specified document if it does not already exist. It must followed with the source data to be indexed in `object` field. + CreateOperation create = 2; + // Performs a partial document update. It must followed with the partial document and update options in in `doc` field. + UpdateOperation update = 3; + // Removes the specified document from the index. + DeleteOperation delete = 4; + } + + // [optional] Set to false to disable setting 'result' in the response to 'noop' if no change to the document occurred. + .google.protobuf.BoolValue detect_noop = 5; + + // [optional] The partial document to index. Required for update, index operations + ObjectMap doc = 6; + + // [optional] When `true`, uses the contents of 'doc' as the value of 'upsert'. If a document exists, it is updated; if it does not exist, a new document is indexed with the parameters specified in the `doc` field. it's only supported for the `update` operation. + .google.protobuf.BoolValue doc_as_upsert = 7; + // [optional] Script for more complex document updates by defining the script with the `source` or `id` from a document + Script script = 8; + + // [optional] When `true`, executes the script whether or not the document exists. + .google.protobuf.BoolValue scripted_upsert = 9; + + // [optional] Defines how to fetch a source. Fetching can be disabled entirely, or the source can be filtered. + SourceConfig source = 10 [json_name = "_source"]; + + // [optional] If the document does not already exist, the contents of 'upsert' are inserted as a new document. If the document exists, the 'script' is executed. it's only supported for the `update` operation. + ObjectMap upsert = 13; + // [optional] The document to index. Required for create operation + ObjectMap object = 14; +} + +message IndexOperation { + // [optional] The document ID. If no ID is specified, a document ID is automatically generated. + .google.protobuf.StringValue id = 1 [json_name = "_id"]; + // [optional] Name of the the data stream, index, or index alias to perform the action on. This parameter is required in `BulkRequest`. + .google.protobuf.StringValue index = 2 [json_name = "_index"]; + + // [optional] Custom value used to route operations to a specific shard. + string routing = 3; + + // [optional] Only perform the operation if the document has this primary term. + .google.protobuf.Int64Value if_primary_term = 4; + // [optional] Only perform the operation if the document has this sequence number + .google.protobuf.Int64Value if_seq_no = 5; + // [optional] Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. + .google.protobuf.Int64Value version = 6; + + enum VersionType { + + VERSION_TYPE_INVALID = 0; + // Retrieve the document if the specified version number is greater than the document's current version + VERSION_TYPE_EXTERNAL = 1; + // Retrieve the document if the specified version number is greater than or equal to the document's current version + VERSION_TYPE_EXTERNAL_GTE = 2; + } + // [optional] Assigns a specific type to the document. + VersionType version_type = 7; + + // [optional] A map from the full name of fields to the name of dynamic templates. Defaults to an empty map. If a name matches a dynamic template, then that template will be applied regardless of other match predicates defined in the template. If a field is already defined in the mapping, then this parameter won't be used. + map dynamic_templates = 8; + + // [optional] ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. + .google.protobuf.StringValue pipeline = 9; + + // [optional] If `true`, the request's actions must target an index alias. Defaults to false. + .google.protobuf.BoolValue require_alias = 10; + +} + +message CreateOperation { + // [optional] The document ID. If no ID is specified, a document ID is automatically generated. + .google.protobuf.StringValue id = 1 [json_name = "_id"]; + // [optional] Name of the the data stream, index, or index alias to perform the action on. This parameter is required in `BulkRequest`. + .google.protobuf.StringValue index = 2 [json_name = "_index"]; + + // [optional] Custom value used to route operations to a specific shard. + string routing = 3; + // [optional] Only perform the operation if the document has this primary term + .google.protobuf.Int64Value if_primary_term = 4; + // [optional] Only perform the operation if the document has this sequence number + .google.protobuf.Int64Value if_seq_no = 5; + // [optional] Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. + .google.protobuf.Int64Value version = 6; + + enum VersionType { + + VERSION_TYPE_INVALID = 0; + // Retrieve the document if the specified version number is greater than the document's current version + VERSION_TYPE_EXTERNAL = 1; + // Retrieve the document if the specified version number is greater than or equal to the document's current version + VERSION_TYPE_EXTERNAL_GTE = 2; + } + // [optional] Assigns a specific type to the document. + VersionType version_type = 7; + + // [optional] A map from the full name of fields to the name of dynamic templates. Defaults to an empty map. If a name matches a dynamic template, then that template will be applied regardless of other match predicates defined in the template. If a field is already defined in the mapping, then this parameter won't be used. + map dynamic_templates = 8; + + // [optional] ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. + .google.protobuf.StringValue pipeline = 9; + + // [optional] If `true`, the request's actions must target an index alias. Defaults to false. + .google.protobuf.BoolValue require_alias = 10; + +} + +message UpdateOperation { + // [required] The document ID. + .google.protobuf.StringValue id = 1 [json_name = "_id"]; + // [optional] Name of the the data stream, index, or index alias to perform the action on. This parameter is required in `BulkRequest`. + .google.protobuf.StringValue index = 2 [json_name = "_index"]; + // [optional] Custom value used to route operations to a specific shard. + string routing = 3; + + // [optional] Only perform the operation if the document has this primary term + .google.protobuf.Int64Value if_primary_term = 4; + // [optional] Only perform the operation if the document has this sequence number + .google.protobuf.Int64Value if_seq_no = 5; + // [optional] Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. + .google.protobuf.Int64Value version = 6; + + enum VersionType { + + VERSION_TYPE_INVALID = 0; + // Retrieve the document if the specified version number is greater than the document's current version + VERSION_TYPE_EXTERNAL = 1; + // Retrieve the document if the specified version number is greater than or equal to the document's current version + VERSION_TYPE_EXTERNAL_GTE = 2; + } + // [optional] Assigns a specific type to the document. + VersionType version_type = 7; + + // [optional] If `true`, the request's actions must target an index alias. Defaults to false. + .google.protobuf.BoolValue require_alias = 8; + // [optional] Specify how many times an update should be retried in the case of a version conflict. + .google.protobuf.Int32Value retry_on_conflict = 9; + +} + +message DeleteOperation { + // [required] The document ID. + .google.protobuf.StringValue id = 1 [json_name = "_id"]; + // [optional] Name of the the data stream, index, or index alias to perform the action on. This parameter is required in `BulkRequest`. + .google.protobuf.StringValue index = 2 [json_name = "_index"]; + // [optional] Custom value used to route operations to a specific shard. + string routing = 3; + + // [optional] Only perform the operation if the document has this primary term + .google.protobuf.Int64Value if_primary_term = 4; + // [optional] Only perform the operation if the document has this sequence number + .google.protobuf.Int64Value if_seq_no = 5; + // [optional] Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. + .google.protobuf.Int64Value version = 6; + + enum VersionType { + + VERSION_TYPE_INVALID = 0; + // Retrieve the document if the specified version number is greater than the document's current version + VERSION_TYPE_EXTERNAL = 1; + // Retrieve the document if the specified version number is greater than or equal to the document's current version + VERSION_TYPE_EXTERNAL_GTE = 2; + } + // [optional] Assigns a specific type to the document. + VersionType version_type = 7; + +} + +// Bulk response contains the individual results of each operation in the request, returned in the order submitted. The success or failure of an individual operation does not affect other operations in the request. +message BulkResponse { + oneof response { + // The bulk success response + BulkResponseBody bulk_response_body = 1; + // The bulk error response + BulkErrorResponse bulk_error_response = 2; + } +} + +// Index Bulk Response contains the individual results of each operation in the request, returned in the order submitted. The success or failure of an individual operation does not affect other operations in the request. +message IndexBulkResponse { + oneof response { + // The index bulk success response + BulkResponseBody bulk_response_body = 1; + // The index bulk error response + BulkErrorResponse bulk_error_response = 2; + } +} + +message BulkErrorResponse { + // [optional] The bulk error + OpenSearchException error = 1; + // [optional] HTTP response status code + .google.protobuf.Int32Value status = 2; +} + +message BulkResponseBody { + + // [optional] If true, one or more of the operations in the bulk request did not complete successfully. + .google.protobuf.BoolValue errors = 1; + // [optional] Contains the result of each operation in the bulk request, in the order they were submitted. + repeated Item items = 2; + // [optional] How long, in milliseconds, it took to process the bulk request. + .google.protobuf.Int64Value took = 3; + // [optional] How long, in milliseconds, it took to process documents through an ingest pipeline + .google.protobuf.Int64Value ingest_took = 4; + +} + +message Item { + oneof item { + ResponseItem create = 1; + ResponseItem delete = 2; + ResponseItem index = 3; + ResponseItem update = 4; + } +} + +message ResponseItem { + + // [optional] The document type. + .google.protobuf.StringValue type = 1 [json_name = "_type"]; + + message Id { + oneof id { + NullValue null_value = 1; + .google.protobuf.StringValue string_value = 2; + } + } + // [optional] The document ID associated with the operation. + Id id = 2 [json_name = "_id"]; + + // [optional] Name of the index associated with the operation. If the operation targeted a data stream, this is the backing index into which the document was written. + .google.protobuf.StringValue index = 3 [json_name = "_index"]; + + // [optional] HTTP status code returned for the operation. + .google.protobuf.Int32Value status = 4; + // [optional] Contains additional information about the failed operation. + ErrorCause error = 5; + + // [optional] The primary term assigned to the document for the operation. + .google.protobuf.Int64Value primary_term = 6 [json_name = "_primary_term"]; + + // [optional] Result of the operation. Successful values are `created`, `deleted`, and `updated`. + .google.protobuf.StringValue result = 7; + // [optional] The sequence number assigned to the document for the operation. Sequence numbers are used to ensure an older version of a document doesn't overwrite a newer version + .google.protobuf.Int64Value seq_no = 8 [json_name = "_seq_no"]; + // [optional] Contains shard information for the operation. This parameter is only returned for successful operations. + ShardStatistics shards = 9 [json_name = "_shards"]; + // [optional] The document version associated with the operation. The document version is incremented each time the document is updated. This parameter is only returned for successful actions. + .google.protobuf.Int64Value version = 10 [json_name = "_version"]; + // [optional] if `true`, it requires immediate visibility of the document + .google.protobuf.BoolValue forced_refresh = 11; + // [optional] + InlineGetDictUserDefined get = 12; + +} + + +message InlineGetDictUserDefined { + // [optional] + ObjectMap fields = 1; + // [optional] Whether the document exists. + .google.protobuf.BoolValue found = 2; + // [optional] The sequence number assigned to the document for the operation. Sequence numbers are used to ensure an older version of a document doesn't overwrite a newer version + .google.protobuf.Int64Value seq_no = 3 [json_name = "_seq_no"]; + // [optional] The primary term assigned to the document for the operation. + .google.protobuf.Int32Value primary_term = 4 [json_name = "_primary_term"]; + // [optional] Custom value used to route operations to a specific shard. + repeated string routing = 5 [json_name = "_routing"]; + // [optional] Contains the document's data + ObjectMap source = 6 [json_name = "_source"]; + +} + +// Index document operation to add a single document to your index +message IndexDocumentRequest { + // [required] Name of the the data stream, index, or index alias to perform the action on. + .google.protobuf.StringValue index = 1; + // [optional] Only perform the operation if the document has this primary term. + .google.protobuf.Int64Value if_primary_term = 2; + // [optional] Only perform the operation if the document has this sequence number. + .google.protobuf.Int64Value if_seq_no = 3; + + enum OpType{ + + OP_TYPE_INVALID = 0; + OP_TYPE_CREATE = 1; + OP_TYPE_INDEX = 2; + } + // [optional] Set to create to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. Same as using the `/_create` endpoint. Valid values: `index`, `create`. If document id is specified, it defaults to `index`. Otherwise, it defaults to `create`. + OpType op_type = 4; + // [optional] ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. + .google.protobuf.StringValue pipeline = 5; + enum Refresh { + + REFRESH_INVALID = 0; + // `REFRESH_FALSE` do nothing with refreshes. + REFRESH_FALSE = 1; + // `REFRESH_TRUE` makes the changes show up in search results immediately, but hurts cluster performance. + REFRESH_TRUE = 2; + // `REFRESH_WAIT_FOR` waits for a refresh. Requests take longer to return, but cluster performance doesn't suffer. + REFRESH_WAIT_FOR = 3; + } + // [optional] enum of whether to refresh the affected shards after performing the indexing operations. Default is false + Refresh refresh = 6; + // [optional] If `true`, the request's actions must target an index alias. Defaults to false. + .google.protobuf.BoolValue require_alias = 7; + // [optional] Custom value used to route operations to a specific shard. + repeated string routing = 8; + // [optional] Period each action waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. + // pattern: ^([0-9\.]+)(?:d|h|m|s|ms|micros|nanos)$ + // Defaults to 1m (one minute). This guarantees OpenSearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. + .google.protobuf.StringValue timeout = 9; + // [optional] Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. + .google.protobuf.Int64Value version = 10; + // Specific version type: `external`, `external_gte`. + enum VersionType { + + VERSION_TYPE_INVALID = 0; + // Retrieve the document if the specified version number is greater than the document's current version + VERSION_TYPE_EXTERNAL = 1; + // Retrieve the document if the specified version number is greater than or equal to the document's current version + VERSION_TYPE_EXTERNAL_GTE = 2; + } + // [optional] Assigns a specific type to the document. + VersionType version_type = 11; + // [optional] The number of active shards that must be available before OpenSearch processes the request. Default is 1 (only the primary shard). Set to all or a positive integer. Values greater than 1 require replicas. For example, if you specify a value of 3, the index must have two replicas distributed across two additional nodes for the operation to succeed. + WaitForActiveShards wait_for_active_shards = 12; + // [required] contain the information you want to index. + ObjectMap request_body = 13; +} + +// index document with specified document ID +message IndexDocumentIdRequest { + //[required] Unique identifier for the document. + .google.protobuf.StringValue id = 1; + // [required] Name of the data stream or index to target. + .google.protobuf.StringValue index = 2; + // [optional] Only perform the operation if the document has this primary term. + .google.protobuf.Int64Value if_primary_term = 3; + // [optional] Only perform the operation if the document has this sequence number. + .google.protobuf.Int64Value if_seq_no = 4; + + enum OpType { + + OP_TYPE_INVALID = 0; + // Index a document only if it doesn't exist + OP_TYPE_CREATE = 1; + // A document ID is included in the request + OP_TYPE_INDEX = 2; + } + // [optional] Set to create to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. Same as using the `/_create` endpoint. Valid values: `index`, `create`. If document id is specified, it defaults to `index`. Otherwise, it defaults to `create`. + OpType op_type = 5; + // [optional] ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. + .google.protobuf.StringValue pipeline = 6; + enum Refresh { + + REFRESH_INVALID = 0; + // `REFRESH_FALSE` do nothing with refreshes. + REFRESH_FALSE = 1; + // `REFRESH_TRUE` makes the changes show up in search results immediately, but hurts cluster performance. + REFRESH_TRUE = 2; + // `REFRESH_WAIT_FOR` waits for a refresh. Requests take longer to return, but cluster performance doesn't suffer. + REFRESH_WAIT_FOR = 3; + } + // [optional] enum of whether to refresh the affected shards after performing the indexing operations. Default is false + Refresh refresh = 7; + // [optional] If `true`, the request's actions must target an index alias. Defaults to false. + .google.protobuf.BoolValue require_alias = 8; + // [optional] Custom value used to route operations to a specific shard. + repeated string routing = 9; + // [optional] Period each action waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. + // Pattern: ^([0-9\.]+)(?:d|h|m|s|ms|micros|nanos)$ + // Defaults to 1m (one minute). This guarantees OpenSearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. + .google.protobuf.StringValue timeout = 10; + // [optional] Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. + .google.protobuf.Int64Value version = 11; + enum VersionType { + + VERSION_TYPE_INVALID = 0; + // Retrieve the document if the specified version number is greater than the document's current version + VERSION_TYPE_EXTERNAL = 1; + // Retrieve the document if the specified version number is greater than or equal to the document's current version + VERSION_TYPE_EXTERNAL_GTE = 2; + } + // [optional] Assigns a specific type to the document. + VersionType version_type = 12; + // [optional] The number of active shards that must be available before OpenSearch processes the request. Default is 1 (only the primary shard). Set to all or a positive integer. Values greater than 1 require replicas. For example, if you specify a value of 3, the index must have two replicas distributed across two additional nodes for the operation to succeed. + WaitForActiveShards wait_for_active_shards = 13; + // [required] contain the information you want to index. + ObjectMap request_body = 14 ; +} + +// The response from index document request +message IndexDocumentResponse { + oneof response { + // Index document success response + IndexDocumentResponseBody index_document_response_body = 1; + // Index document failure response + IndexDocumentErrorResponse index_document_error_response = 2; + } +} +// The error response from index document request +message IndexDocumentErrorResponse { + // [optional] + OpenSearchException error = 1; + // [optional] HTTP response status code + .google.protobuf.Int32Value status = 2; +} + +message IndexDocumentResponseBody { + // [optional] The document type. + .google.protobuf.StringValue type = 1 [json_name = "_type"]; + // [optional] The document's ID. + .google.protobuf.StringValue id = 2 [json_name = "_id"]; + // [optional] The name of the index. + .google.protobuf.StringValue index = 3 [json_name = "_index"]; + // [optional] The primary term assigned when the document was indexed. + .google.protobuf.Int64Value primary_term = 4 [json_name = "_primary_term"]; + + enum Result { + + RESULT_INVALID = 0; + RESULT_CREATED = 1; + RESULT_DELETED = 2; + RESULT_NOOP = 3; + RESULT_NOT_FOUND = 4; + RESULT_UPDATED = 5; + } + // [optional] The result of the index operation. + Result result = 5; + // [optional] The sequence number assigned when the document was indexed. + .google.protobuf.Int64Value seq_no = 6 [json_name = "_seq_no"]; + // [optional] Detailed information about the cluster's shards. + ShardStatistics shards = 7 [json_name = "_shards"]; + // [optional] The document's version. + .google.protobuf.Int64Value version = 8 [json_name = "_version"]; + // [optional] if `true`, it requires immediate visibility of the document + .google.protobuf.BoolValue forced_refresh = 9; +} + + +message IndexDocumentIdResponseBody { + // [optional] The document type. + .google.protobuf.StringValue type = 1 [json_name = "_type"]; + // [optional] The document's ID. + .google.protobuf.StringValue id = 2 [json_name = "_id"]; + // [optional] The name of the index. + .google.protobuf.StringValue index = 3 [json_name = "_index"]; + // [optional] The primary term assigned when the document was indexed. + .google.protobuf.Int64Value primary_term = 4 [json_name = "_primary_term"]; + + + enum Result { + + RESULT_INVALID = 0; + RESULT_CREATED = 1; + RESULT_DELETED = 2; + RESULT_NOOP = 3; + RESULT_NOT_FOUND = 4; + RESULT_UPDATED = 5; + } + // [optional] The result of the index operation. + Result result = 5; + // [optional] The sequence number assigned when the document was indexed. + .google.protobuf.Int64Value seq_no = 6 [json_name = "_seq_no"]; + // [optional] Detailed information about the cluster's shards. + ShardStatistics shards = 7 [json_name = "_shards"]; + // [optional] The document's version. + .google.protobuf.Int64Value version = 8 [json_name = "_version"]; + // [optional] if `true`, it requires immediate visibility of the document + .google.protobuf.BoolValue forced_refresh = 9; + +} + +// The response from index document Id request +message IndexDocumentIdResponse { + oneof response { + IndexDocumentIdResponseBody index_document_id_response_body = 1; + IndexDocumentIdErrorResponse index_document_id_error_response = 2; + } +} + +// The error response from index document Id request +message IndexDocumentIdErrorResponse { + // [optional] + OpenSearchException error = 1; + // [optional] HTTP response status code + .google.protobuf.Int32Value status = 2; +} + +// create document with Id to target index request +message IndexDocumentCreateIdRequest { + // [required] Unique identifier for the document. + .google.protobuf.StringValue id = 1; + // [required] Name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn't match a data stream template, this request creates the index. + .google.protobuf.StringValue index = 2; + // [optional] ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. + .google.protobuf.StringValue pipeline = 3; + + enum Refresh { + + REFRESH_INVALID = 0; + // `REFRESH_FALSE` do nothing with refreshes. + REFRESH_FALSE = 1; + // `REFRESH_TRUE` makes the changes show up in search results immediately, but hurts cluster performance. + REFRESH_TRUE = 2; + // `REFRESH_WAIT_FOR` waits for a refresh. Requests take longer to return, but cluster performance doesn't suffer. + REFRESH_WAIT_FOR = 3; + } + // [optional] enum of whether to refresh the affected shards after performing the indexing operations. Default is false + Refresh refresh = 4; + // Custom value used to route operations to a specific shard. + repeated string routing = 5; + // [optional] Period each action waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. + // Pattern: ^([0-9\.]+)(?:d|h|m|s|ms|micros|nanos)$ + // Defaults to 1m (one minute). This guarantees OpenSearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. + .google.protobuf.StringValue timeout = 6; + // [optional] Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. + .google.protobuf.Int64Value version = 7; + enum VersionType { + + VERSION_TYPE_INVALID = 0; + // Retrieve the document if the specified version number is greater than the document's current version + VERSION_TYPE_EXTERNAL = 1; + // Retrieve the document if the specified version number is greater than or equal to the document's current version + VERSION_TYPE_EXTERNAL_GTE = 2; + } + // [optional] Assigns a specific type to the document. + VersionType version_type = 8; + // [optional] The number of active shards that must be available before OpenSearch processes the request. Default is 1 (only the primary shard). Set to all or a positive integer. Values greater than 1 require replicas. For example, if you specify a value of 3, the index must have two replicas distributed across two additional nodes for the operation to succeed. + WaitForActiveShards wait_for_active_shards = 9; + // [required] contain the information you want to index. + ObjectMap request_body = 10; +} + +message IndexDocumentCreateIdResponseBody { + // [optional] The document type. + .google.protobuf.StringValue type = 1 [json_name = "_type"]; + // [optional] The document's ID. + .google.protobuf.StringValue id = 2 [json_name = "_id"]; + // [optional] The name of the index. + .google.protobuf.StringValue index = 3 [json_name = "_index"]; + // [optional] The primary term assigned when the document was indexed. + .google.protobuf.Int64Value primary_term = 4 [json_name = "_primary_term"]; + + + enum Result { + + RESULT_INVALID = 0; + RESULT_CREATED = 1; + RESULT_DELETED = 2; + RESULT_NOOP = 3; + RESULT_NOT_FOUND = 4; + RESULT_UPDATED = 5; + } + // [optional] The result of the index operation. + Result result = 5; + // [optional] The sequence number assigned when the document was indexed. + .google.protobuf.Int64Value seq_no = 6 [json_name = "_seq_no"]; + // [optional] Detailed information about the cluster's shards. + ShardStatistics shards = 7 [json_name = "_shards"]; + // [optional] The document's version. + .google.protobuf.Int64Value version = 8 [json_name = "_version"]; + // [optional] if `true`, it requires immediate visibility of the document + .google.protobuf.BoolValue forced_refresh = 9; + +} + +// The response from document creation with Id to target index request +message IndexDocumentCreateIdResponse { + oneof response { + IndexDocumentCreateIdResponseBody index_document_create_id_response_body = 1; + IndexDocumentCreateIdErrorResponse index_document_create_id_error_response = 2; + } +} +// The error response from document creation with Id to target index request +message IndexDocumentCreateIdErrorResponse { + // [optional] + OpenSearchException error = 1; + // [optional] HTTP response status code + .google.protobuf.Int32Value status = 2; +} + +// Delete index document with Id request +message DeleteDocumentRequest { + // [required] Unique identifier for the document. + .google.protobuf.StringValue id = 1; + // [required] Name of the target index. + .google.protobuf.StringValue index = 2; + // [optional] Only perform the operation if the document has this primary term. + .google.protobuf.Int64Value if_primary_term = 3; + // [optional] Only perform the operation if the document has this sequence number. + .google.protobuf.Int64Value if_seq_no = 4; + + enum Refresh { + + REFRESH_INVALID = 0; + // `REFRESH_FALSE` do nothing with refreshes. + REFRESH_FALSE = 1; + // `REFRESH_TRUE` makes the changes show up in search results immediately, but hurts cluster performance. + REFRESH_TRUE = 2; + // `REFRESH_WAIT_FOR` waits for a refresh. Requests take longer to return, but cluster performance doesn't suffer. + REFRESH_WAIT_FOR = 3; + } + // [optional] enum of whether to refresh the affected shards after performing the indexing operations. Default is false + Refresh refresh = 5; + // Custom value used to route operations to a specific shard. + repeated string routing = 6; + // [optional] Period each action waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. + // Pattern: ^([0-9\.]+)(?:d|h|m|s|ms|micros|nanos)$ + // Defaults to 1m (one minute). This guarantees OpenSearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. + .google.protobuf.StringValue timeout = 7; + // [optional] Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. + .google.protobuf.Int64Value version = 8; + enum VersionType { + + VERSION_TYPE_INVALID = 0; + // Retrieve the document if the specified version number is greater than the document's current version + VERSION_TYPE_EXTERNAL = 1; + // Retrieve the document if the specified version number is greater than or equal to the document's current version + VERSION_TYPE_EXTERNAL_GTE = 2; + } + // [optional] Assigns a specific type to the document. + VersionType version_type = 9; + // [optional] The number of active shards that must be available before OpenSearch processes the request. Default is 1 (only the primary shard). Set to all or a positive integer. Values greater than 1 require replicas. For example, if you specify a value of 3, the index must have two replicas distributed across two additional nodes for the operation to succeed. + WaitForActiveShards wait_for_active_shards = 10; +} + +message DeleteDocumentResponseBody { + // [optional] The document type. + .google.protobuf.StringValue type = 1 [json_name = "_type"]; + // [optional] The document's ID. + .google.protobuf.StringValue id = 2 [json_name = "_id"]; + // [optional] The name of the index. + .google.protobuf.StringValue index = 3 [json_name = "_index"]; + // [optional] The primary term assigned when the document was indexed. + .google.protobuf.Int64Value primary_term = 4 [json_name = "_primary_term"]; + + + enum Result { + + RESULT_INVALID = 0; + RESULT_CREATED = 1; + RESULT_DELETED = 2; + RESULT_NOOP = 3; + RESULT_NOT_FOUND = 4; + RESULT_UPDATED = 5; + } + // [optional] The result of the index operation. + Result result = 5; + // [optional] The sequence number assigned when the document was indexed. + .google.protobuf.Int64Value seq_no = 6 [json_name = "_seq_no"]; + // [optional] Detailed information about the cluster's shards. + ShardStatistics shards = 7 [json_name = "_shards"]; + // [optional] The document's version. + .google.protobuf.Int64Value version = 8 [json_name = "_version"]; + // [optional] if `true`, it requires immediate visibility of the document + .google.protobuf.BoolValue forced_refresh = 9; + +} + +// The response from delete index document with Id request +message DeleteDocumentResponse { + oneof response { + DeleteDocumentResponseBody delete_document_response_body = 1; + DeleteDocumentErrorResponse delete_document_error_response = 2; + } +} +// The error response from delete index document with Id request +message DeleteDocumentErrorResponse { + // [optional] + OpenSearchException error = 1; + // [optional] HTTP response status code + .google.protobuf.Int32Value status = 2; +} + +// Update index document with Id request +message UpdateDocumentRequest { + // [optional] Unique identifier for the document. + .google.protobuf.StringValue id = 1; + // [optional] Name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn't match a data stream template, this request creates the index. + .google.protobuf.StringValue index = 2; + // [optional] Set to false to disable source retrieval. You can also specify a comma-separated list of the fields you want to retrieve. + SourceConfigParam source = 3 [json_name = "_source"]; + // [optional] A comma-separated list of source fields to exclude from the response. + repeated string source_excludes = 4; + // [optional] A comma-separated list of source fields to include in the response. +// repeated string source_includes = 5 [json_name = "_source_includes",; + // [optional] Only perform the operation if the document has this primary term. + .google.protobuf.Int64Value if_primary_term = 6; + // [optional] Only perform the operation if the document has this sequence number. + .google.protobuf.Int64Value if_seq_no = 7; + // [optional] The script language. + .google.protobuf.StringValue lang = 8; + enum Refresh { + + REFRESH_INVALID = 0; + // `REFRESH_FALSE` do nothing with refreshes. + REFRESH_FALSE = 1; + // `REFRESH_TRUE` makes the changes show up in search results immediately, but hurts cluster performance. + REFRESH_TRUE = 2; + // `REFRESH_WAIT_FOR` waits for a refresh. Requests take longer to return, but cluster performance doesn't suffer. + REFRESH_WAIT_FOR = 3; + } + // [optional] enum of whether to refresh the affected shards after performing the indexing operations. Default is false + Refresh refresh = 9; + // [optional] If `true`, the request's actions must target an index alias. Defaults to false. + .google.protobuf.BoolValue require_alias = 10; + // Specify how many times should the operation be retried when a conflict occurs. + .google.protobuf.Int32Value retry_on_conflict = 11; + // Custom value used to route operations to a specific shard. + repeated string routing = 12; + // Period to wait for dynamic mapping updates and active shards. This guarantees OpenSearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. + // Pattern: ^([0-9\.]+)(?:d|h|m|s|ms|micros|nanos)$ + google.protobuf.StringValue timeout = 13; + // [optional] The number of active shards that must be available before OpenSearch processes the request. Default is 1 (only the primary shard). Set to all or a positive integer. Values greater than 1 require replicas. For example, if you specify a value of 3, the index must have two replicas distributed across two additional nodes for the operation to succeed. + WaitForActiveShards wait_for_active_shards = 14; + // [required] The update document request body + UpdateDocumentRequestBody request_body = 15; +} + +message UpdateDocumentRequestBody { + // [optional] Controls whether an operation is ignored if there is no change. Default is true. + .google.protobuf.BoolValue detect_noop = 1; + + // [optional] A partial update to an existing document. + ObjectMap doc = 2; + + // [optional] Set to true to use the contents of 'doc' as the value of 'upsert' + .google.protobuf.BoolValue doc_as_upsert = 3; + // [optional] Script for more complex document updates by defining the script with the `source` or `id` from a document + Script script = 4; + + //[optional] Set to true to execute the script whether or not the document exists. + .google.protobuf.BoolValue scripted_upsert = 5; + // [optional] Defines how to fetch a source. Fetching can be disabled entirely, or the source can be filtered. + SourceConfig source = 6 [json_name = "_source"]; + + // [optional] If the document does not already exist, the contents of 'upsert' are inserted as a new document. If the document exists, the 'script' is executed. + ObjectMap upsert = 7; +} + +// The response from update index document with Id request +message UpdateDocumentResponse { + oneof response { + UpdateDocumentResponseBody update_document_response_body = 1; + UpdateDocumentErrorResponse update_document_error_response = 2; + } +} +// The error response from update index document with Id request +message UpdateDocumentErrorResponse { + // [optional] + OpenSearchException error = 1; + // [optional] HTTP response status code + .google.protobuf.Int32Value status = 2; +} + +message UpdateDocumentResponseBody { + // [optional] The document type. + .google.protobuf.StringValue type = 1 [json_name = "_type"]; + // [optional] The document's ID. + .google.protobuf.StringValue id = 2 [json_name = "_id"]; + // [optional] The name of the index. + .google.protobuf.StringValue index = 3 [json_name = "_index"]; + // [optional] The primary term assigned when the document was indexed. + .google.protobuf.Int64Value primary_term = 4 [json_name = "_primary_term"]; + + enum Result { + + RESULT_INVALID = 0; + RESULT_CREATED = 1; + RESULT_DELETED = 2; + RESULT_NOOP = 3; + RESULT_NOT_FOUND = 4; + RESULT_UPDATED = 5; + } + // [optional] The result of the index operation. + Result result = 5; + // [optional] The sequence number assigned when the document was indexed. + .google.protobuf.Int64Value seq_no = 6 [json_name = "_seq_no"]; + // [optional] Detailed information about the cluster's shards. + ShardStatistics shards = 7 [json_name = "_shards"]; + // [optional] The document's version. + .google.protobuf.Int64Value version = 8 [json_name = "_version"]; + // [optional] if `true`, it requires immediate visibility of the document + .google.protobuf.BoolValue forced_refresh = 9; + // [optional] + InlineGet get = 10; +} + +// Get document request with document ID specified +message GetDocumentRequest { + // [required] Unique identifier for the document. + .google.protobuf.StringValue id = 1; + // [required] Name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn't match a data stream template, this request creates the index. + .google.protobuf.StringValue index = 2; + // [optional] Set to false to disable source retrieval. You can also specify a comma-separated list of the fields you want to retrieve. Default is true. + SourceConfigParam source = 3 [json_name = "_source"]; + // [optional] A comma-separated list of source fields to exclude from the response. + repeated string source_excludes = 4; + // [optional] A comma-separated list of source fields to include in the response. +// repeated string source_includes = 5 [json_name = "_source_includes",; + // [optional] Specifies a preference of which shard to retrieve results from. Available options are _local, which tells the operation to retrieve results from a locally allocated shard replica, and a custom string value assigned to a specific shard replica. By default, OpenSearch executes get document operations on random shards. + .google.protobuf.StringValue preference = 6; + // [optional] Specifies whether the operation should run in realtime. If false, the operation waits for the index to refresh to analyze the source to retrieve data, which makes the operation near-realtime. Default is true. + .google.protobuf.BoolValue realtime = 7; + // [optional] If true, OpenSearch refreshes shards to make the get operation available to search results. Valid options are true, false, and wait_for, which tells OpenSearch to wait for a refresh before executing the operation. Default is false. + .google.protobuf.BoolValue refresh = 8; + // [optional] A value used to route the operation to a specific shard. + repeated string routing = 9; + // [optional] List of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to false. + repeated string stored_fields = 10; + // [optional] Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. + .google.protobuf.Int64Value version = 11; + enum VersionType { + + VERSION_TYPE_INVALID = 0; + // Retrieve the document if the specified version number is greater than the document's current version + VERSION_TYPE_EXTERNAL = 1; + // Retrieve the document if the specified version number is greater than or equal to the document's current version + VERSION_TYPE_EXTERNAL_GTE = 2; + } + // [optional] Assigns a specific type to the document. + VersionType version_type = 12; + +} + +message GetDocumentResponseBody { + // [optional] The document type. + .google.protobuf.StringValue type = 1 [json_name = "_type"]; + // [optional] The name of the index. + .google.protobuf.StringValue index = 2 [json_name = "_index"]; + // [optional] Contains the document's data that's stored in the index. Only returned if both stored_fields and found are true. + .google.protobuf.Struct fields = 3; + // [optional] Whether the document exists. + .google.protobuf.BoolValue found = 4; + // [optional] The document's ID. + .google.protobuf.StringValue id = 5 [json_name = "_id"]; + // [optional] The primary term assigned when the document is indexed. + .google.protobuf.Int64Value primary_term = 6 [json_name = "_primary_term"]; + // [optional] The shard that the document is routed to. If the document is not routed to a particular shard, this field is omitted. + .google.protobuf.StringValue routing = 7 [json_name = "_routing"]; + // [optional] The sequence number assigned when the document was indexed. + .google.protobuf.Int64Value seq_no = 8 [json_name = "_seq_no"]; + // [optional] Contains the document's data if found is true. If _source is set to false or stored_fields is set to true in the URL parameters, this field is omitted. + .google.protobuf.Struct source = 9 [json_name = "_source"]; + // [optional] The document's version number. Updated whenever the document changes. + .google.protobuf.Int64Value version = 10 [json_name = "_version"]; +} + +// The response from get document request with document ID specified request +message GetDocumentResponse { + oneof response { + GetDocumentResponseBody get_document_response_body = 1; + GetDocumentErrorResponse get_document_error_response = 2; + } +} +// The error response from get document request with document ID specified request +message GetDocumentErrorResponse { + // [optional] + OpenSearchException error = 1; + // [optional] HTTP response status code + .google.protobuf.Int32Value status = 2; +} + +// Get document source with document ID specified request +message GetDocumentSourceRequest { + // [required] Unique identifier for the document. + .google.protobuf.StringValue id = 1; + // [required] Name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn't match a data stream template, this request creates the index. + .google.protobuf.StringValue index = 2; + // [optional] Set to false to disable source retrieval. You can also specify a comma-separated list of the fields you want to retrieve. + SourceConfigParam source = 3 [json_name = "_source"]; + // [optional] A comma-separated list of source fields to exclude from the response. + repeated string source_excludes = 4; + // [optional] A comma-separated list of source fields to include in the response. +// repeated string source_includes = 5 [json_name = "_source_includes",; + // [optional] Specifies the node or shard the operation should be performed on. Random by default. + .google.protobuf.StringValue preference = 6; + // [optional] Specifies whether the operation should run in realtime. If false, the operation waits for the index to refresh to analyze the source to retrieve data, which makes the operation near-realtime. Default is true. + .google.protobuf.BoolValue realtime = 7; + // [optional] If true, OpenSearch refreshes shards to make the get operation available to search results. Valid options are true, false, and wait_for, which tells OpenSearch to wait for a refresh before executing the operation. Default is false. + .google.protobuf.BoolValue refresh = 8; + // [optional] Custom value used to route operations to a specific shard. + repeated string routing = 9; + // [optional] Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. + .google.protobuf.Int64Value version = 10; + enum VersionType { + + VERSION_TYPE_INVALID = 0; + // Retrieve the document if the specified version number is greater than the document's current version + VERSION_TYPE_EXTERNAL = 1; + // Retrieve the document if the specified version number is greater than or equal to the document's current version + VERSION_TYPE_EXTERNAL_GTE = 2; + } + // [optional] Assigns a specific type to the document. + VersionType version_type = 11; +} + +message GetDocumentSourceResponseBody { + // [optional] + .google.protobuf.Struct object = 1; +} + +// The response from get document source with document ID specified request +message GetDocumentSourceResponse { + oneof response { + GetDocumentSourceResponseBody get_document_source_response_body = 1; + GetDocumentSourceErrorResponse get_document_source_error_response = 2; + } +} + +message GetDocumentSourceErrorResponse { + // [optional] + OpenSearchException error = 1; + // [optional] HTTP response status code + .google.protobuf.Int32Value status = 2; +} diff --git a/server/src/main/proto/spec/search.proto b/server/src/main/proto/spec/search.proto new file mode 100644 index 0000000000000..700d276d0d667 --- /dev/null +++ b/server/src/main/proto/spec/search.proto @@ -0,0 +1,1465 @@ +syntax = "proto3"; + +option java_multiple_files = true; +option java_package = "opensearch.protos"; +option java_outer_classname = "SearchProto"; +option go_package = "opensearchpb"; + +import "google/protobuf/wrappers.proto"; +import "google/protobuf/descriptor.proto"; +import "google/protobuf/struct.proto"; +import "spec/common.proto"; + +// Information about the document hits returned that match the query. +message Hits { + message Total { + // The total number of hits that match the query. The accuracy of the value depends on "relation" below. + int64 value = 1; + + // How the total.value should be interpreted. + // "eq": The total hit count is equal to TotalHits.value. + // "gte" The total hit count is greater than or equal to TotalHits.value. + string relation = 2; + } + + // The total number of hits that match the query. + Total total = 1; + // The maximum score of all hits. + float max_score = 2; + // The document hits + repeated SearchHit hits = 3; +} + +// A hit in the search results representing a document which matches the query. +message SearchHit { + // The index name/id from where the hit in the search results was returned from. + // For single-tenancy deployments, it will just be the one index that is hosted. + // But for some technologies like OpenSearch which supports hosting and querying + // multiple indices in one request, it can be different per document. + string index = 1; + // The ID of the document, usually a UUID. + string id = 2; + // The score of the hit. + float score = 3; + // Contains field values for the documents. These fields must be specified in the request using one or more of the following request parameters: + // + // - fields (Not supported yet) + // - docvalue_fields (Not supported yet) + // - script_fields (Not supported yet) + // - stored_fields + map fields = 4; + + // tag of named queries that the hit matches with + repeated string matched_queries = 5; +} + +message Field { + // [required] Field to return. Supports wildcards (*). + string name = 1; + enum Format { + FORMAT_INVALID = 0; + FORMAT_DATE = 1; + FORMAT_DATE_NANOS = 2; + FORMAT_GEO_POINT = 3; + FORMAT_GEO_SHAPE = 4; + } + // [optional] Format for date and geospatial fields. Other field data types do not support this parameter. + Format format = 2; +} + +// Payload is a generic message that can be used to represent a field value in a search hit. +message Payload { + repeated string strings = 1; + repeated string value = 2 [deprecated = true]; + repeated bytes bytes = 3; + repeated double doubles = 4; + repeated int32 ints = 5; + repeated int64 longs = 6; + repeated float floats = 7; + repeated bool booleans = 8; +} + +// Sort by the specific field +message FieldSort { + string field_name = 1; + //[optional] if sort order is not set it defaults to desc + SortOrder sort = 2; +} + +// SortOrder allows to specify the order for sorting search results +message SortOrder { + Order order = 1; + enum Order { + ORDER_INVALID = 0; + ORDER_ASC = 1; + ORDER_DESC = 2; + } +} + + +message AggregationBreakdown { + + // [required] Contains the time spent running the aggregation’s buildAggregations() method, which builds the results of this aggregation. For concurrent segment search, the build_aggregation method contains the total elapsed time across all slices (the difference between the last completed slice execution end time and the first slice execution start time). + .google.protobuf.Int64Value build_aggregation = 1; + + // [required] Contains the number of invocations of a build_aggregation. + .google.protobuf.Int64Value build_aggregation_count = 2; + + // [required] Contains the time spent running the aggregation’s getLeafCollector() method, which creates a new collector to collect the given context. For concurrent segment search, the build_leaf_collector method contains the total elapsed time across all slices (the difference between the last completed slice execution end time and the first slice execution start time). + .google.protobuf.Int64Value build_leaf_collector = 3; + + // [required] Contains the number of invocations of a build_leaf_collector. + .google.protobuf.Int64Value build_leaf_collector_count = 4; + + // [required] Contains the time spent collecting the documents into buckets. For concurrent segment search, the collect method contains the total elapsed time across all slices (the difference between the last completed slice execution end time and the first slice execution start time). + .google.protobuf.Int64Value collect = 5; + + // [required] Contains the number of invocations of a collect. + .google.protobuf.Int64Value collect_count = 6; + + // [required] Contains the amount of time taken to execute the preCollection() callback method during AggregationCollectorManager creation. For concurrent segment search, the initialize method contains the total elapsed time across all slices (the difference between the last completed slice execution end time and the first slice execution start time). + .google.protobuf.Int64Value initialize = 7; + + // [required] Contains the number of invocations of a initialize. + .google.protobuf.Int64Value initialize_count = 8; + + // [optional] Contains the time spent running the aggregation’s postCollection() callback method. For concurrent segment search, the post_collection method contains the total elapsed time across all slices (the difference between the last completed slice execution end time and the first slice execution start time). + .google.protobuf.Int64Value post_collection = 9; + + // [optional] Contains the number of invocations of a post_collection. + .google.protobuf.Int64Value post_collection_count = 10; + + // [required] Contains the time spent in the reduce phase. For concurrent segment search, the reduce method contains the total elapsed time across all slices (the difference between the last completed slice execution end time and the first slice execution start time). + .google.protobuf.Int64Value reduce = 11; + + // [required] Contains the number of invocations of a reduce. + .google.protobuf.Int64Value reduce_count = 12; + +} + +message AggregationProfileDebug { + + .google.protobuf.Int32Value segments_with_multi_valued_ords = 1; + + .google.protobuf.StringValue collection_strategy = 2; + + .google.protobuf.Int32Value segments_with_single_valued_ords = 3; + + .google.protobuf.Int32Value total_buckets = 4; + + .google.protobuf.Int32Value built_buckets = 5; + + .google.protobuf.StringValue result_strategy = 6; + + .google.protobuf.BoolValue has_filter = 7; + + .google.protobuf.StringValue delegate = 8; + + AggregationProfileDebug delegate_debug = 9; + + .google.protobuf.Int32Value chars_fetched = 10; + + .google.protobuf.Int32Value extract_count = 11; + + .google.protobuf.Int32Value extract_ns = 12; + + .google.protobuf.Int32Value values_fetched = 13; + + .google.protobuf.Int32Value collect_analyzed_ns = 14; + + .google.protobuf.Int32Value collect_analyzed_count = 15; + + .google.protobuf.Int32Value surviving_buckets = 16; + + .google.protobuf.Int32Value ordinals_collectors_used = 17; + + .google.protobuf.Int32Value ordinals_collectors_overhead_too_high = 18; + + .google.protobuf.Int32Value string_hashing_collectors_used = 19; + + .google.protobuf.Int32Value numeric_collectors_used = 20; + + .google.protobuf.Int32Value empty_collectors_used = 21; + + repeated string deferred_aggregators = 22; + + .google.protobuf.Int32Value segments_with_doc_count_field = 23; + + .google.protobuf.Int32Value segments_with_deleted_docs = 24; + + repeated AggregationProfileDelegateDebugFilter filters = 25; + + .google.protobuf.Int32Value segments_counted = 26; + + .google.protobuf.Int32Value segments_collected = 27; + + .google.protobuf.StringValue map_reducer = 28; + +} + +message AggregationProfileDelegateDebugFilter { + + .google.protobuf.Int32Value results_from_metadata = 1; + + .google.protobuf.StringValue query = 2; + + .google.protobuf.StringValue specialized_for = 3; + + .google.protobuf.Int32Value segments_counted_in_constant_time = 4; + +} + +message AggregationProfile { + + AggregationBreakdown breakdown = 1; + + .google.protobuf.StringValue description = 2; + + // Time unit for nanoseconds + .google.protobuf.Int64Value time_in_nanos = 3; + + .google.protobuf.StringValue type = 4; + + AggregationProfileDebug debug = 5; + + repeated AggregationProfile children = 6; + +} + +message Collector { + + // [required] The collector name. + .google.protobuf.StringValue name = 1; + + // [required] Contains a description of the collector. + .google.protobuf.StringValue reason = 2; + + // [required] The total elapsed time for this collector, in nanoseconds. For concurrent segment search, time_in_nanos is the total amount of time across all slices (the difference between the last completed slice execution end time and the first slice execution start time). + .google.protobuf.Int64Value time_in_nanos = 3; + + // [optional] If a collector has subcollectors (children), this field contains information about the subcollectors. + repeated Collector children = 4; + +} + +message FetchProfileBreakdown { + + .google.protobuf.Int32Value load_stored_fields = 1; + + .google.protobuf.Int32Value load_stored_fields_count = 2; + + .google.protobuf.Int32Value next_reader = 3; + + .google.protobuf.Int32Value next_reader_count = 4; + + .google.protobuf.Int32Value process_count = 5; + + .google.protobuf.Int32Value process = 6; + +} + +message FetchProfileDebug { + + repeated string stored_fields = 1; + + .google.protobuf.Int32Value fast_path = 2; + +} + +message FetchProfile { + + .google.protobuf.StringValue type = 1; + + .google.protobuf.StringValue description = 2; + + // Time unit for nanoseconds + .google.protobuf.Int64Value time_in_nanos = 3; + + FetchProfileBreakdown breakdown = 4; + + FetchProfileDebug debug = 5; + + repeated FetchProfile children = 6; + +} + +message ExplanationDetail { + + // [required] Explains what type of calculation is performed + .google.protobuf.StringValue description = 1; + + // [required] Shows any subcalculations performed. + repeated ExplanationDetail details = 2; + + // [required] Shows the result of the calculation, + .google.protobuf.DoubleValue value = 3; + +} + +message Explanation { + + // [required] Explains what type of calculation is performed + .google.protobuf.StringValue description = 1; + // [optional] Shows any subcalculations performed. + repeated ExplanationDetail details = 2; + // [required] Shows the result of the calculation, + .google.protobuf.DoubleValue value = 3; + +} + +message Hit { + + .google.protobuf.StringValue type = 1 [json_name = "_type"]; + + // [required] Name of the index containing the returned document. + .google.protobuf.StringValue index = 2 [json_name = "_index"]; + // [required] Unique identifier for the returned document. This ID is only unique within the returned index. + .google.protobuf.StringValue id = 3 [json_name = "_id"]; + + message Score { + oneof score{ + .google.protobuf.DoubleValue double_value = 1; + .google.protobuf.StringValue string_value = 2; + NullValue null_value = 3; + } + } + + // [optional] Relevance of the returned document. + Score score = 4 [json_name = "_score"]; + + // [optional] Explanation of how the relevance score (_score) is calculated for every result. + Explanation explanation = 5 [json_name = "_explanation"]; + + // [optional] Contains field values for the documents. + .google.protobuf.Struct fields = 6; + + // [optional] An additional highlight element for each search hit that includes the highlighted fields and the highlighted fragments. + map highlight = 7; + + // [optional] An additional nested hits that caused a search hit to match in a different scope. + map inner_hits = 8; + + // [optional] List of matched query names used in the search request. + repeated string matched_queries = 9; + + // [optional] Defines from what inner nested object this inner hit came from + NestedIdentity nested = 10 [json_name = "_nested"]; + + // [optional] List of fields ignored. + repeated string ignored = 11 [json_name = "_ignored"];; + + // [optional] These values are retrieved from the document’s original JSON source and are raw so will not be formatted or treated in any way, unlike the successfully indexed fields which are returned in the fields section. + map ignored_field_values = 12; + + // [optional] Shard from which this document was retrieved. + .google.protobuf.StringValue shard = 13 [json_name = "_shard"]; + + // [optional] Node from which this document was retrieved. + .google.protobuf.StringValue node = 14 [json_name = "_node"]; + + .google.protobuf.StringValue routing = 15 [json_name = "_routing"]; + + // [optional] Source document. + .google.protobuf.Struct source = 16 [json_name = "_source"]; + + // [optional] Counts the number of operations that happened on the index + .google.protobuf.Int64Value seq_no = 17 [json_name = "_seq_no"]; + + // [optional] Counts the number of shard has changed. + .google.protobuf.Int64Value primary_term = 18 [json_name = "_primary_term"]; + + // [optional] Version number of the document. + .google.protobuf.Int64Value version = 19 [json_name = "_version"]; + + // [optional] Sorted values + repeated FieldValueResponse sort = 20; + +} + +message HitsMetadata { + message Total { + oneof total{ + TotalHits total_hits = 1; + .google.protobuf.DoubleValue double_value = 2; + } + } + // [optional] Metadata about the number of matching documents. + Total total = 1; + + // [required] Array of returned document objects. + repeated Hit hits = 2; + + message MaxScore{ + oneof max_score{ + .google.protobuf.DoubleValue double_value = 1; + .google.protobuf.StringValue string_value = 2; + NullValue null_value = 3; + } + } + + // [optional] Highest returned document _score. + MaxScore max_score = 3; + +} + +message TotalHits { + + // [required] Indicates whether the number of matching documents in the value parameter is accurate or a lower bound. + TotalHitsRelation relation = 1; + enum TotalHitsRelation { + + TOTAL_HITS_RELATION_INVALID = 0; + // Accurate + TOTAL_HITS_RELATION_EQ = 1; + // Lower bound + TOTAL_HITS_RELATION_GTE = 2; + } + // [required] Total number of matching documents. + .google.protobuf.Int64Value value = 2; + +} + +message InnerHitsResult { + + // [required] An additional nested hits value. + HitsMetadata hits = 1; + +} + + +message NestedIdentity { + + // [required] The name of the nested field. + .google.protobuf.StringValue field = 1; + + // [required] Indicates the position or index of the nested document. + .google.protobuf.Int32Value offset = 2; + + // [optional] Inner nested object. + NestedIdentity nested = 3 [json_name = "_nested"]; + +} + +message Profile { + + // [required] A search request can be executed against one or more shards in the index, and a search may involve one or more indexes. Thus, the profile.shards array contains profiling information for each shard that was involved in the search. + repeated ShardProfile shards = 1; + +} + +message QueryBreakdown { + + // [required] The advance method is a lower-level version of the next_doc method in Lucene. It also finds the next matching document but necessitates that the calling query perform additional tasks, such as identifying skips. Some queries, such as conjunctions (must clauses in Boolean queries), cannot use next_doc. For those queries, advance is timed. + .google.protobuf.Int64Value advance = 1; + // [required] Contains the number of invocations of the advance method. Different invocations of the same method occur because the method is called on different documents. You can determine the selectivity of a query by comparing counts in different query components. + .google.protobuf.Int64Value advance_count = 2; + // [required] A Scorer iterates over matching documents and generates a score for each document. The build_scorer field contains the amount of time spent generating the Scorer object. This does not include the time spent scoring the documents. The Scorer initialization time depends on the optimization and complexity of a particular query. The build_scorer parameter also includes the amount of time associated with caching, if caching is applicable and enabled for the query. + .google.protobuf.Int64Value build_scorer = 3; + // [required] Build_scorer_count contains the number of invocations of the build_scorer method. Different invocations of the same method occur because the method is called on different documents. You can determine the selectivity of a query by comparing counts in different query components. + .google.protobuf.Int64Value build_scorer_count = 4; + // [required] A Query object in Lucene is immutable. Yet, Lucene should be able to reuse Query objects in multiple IndexSearcher objects. Thus, Query objects need to keep temporary state and statistics associated with the index in which the query is executed. To achieve reuse, every Query object generates a Weight object, which keeps the temporary context (state) associated with the tuple. The create_weight field contains the amount of time spent creating the Weight object. + .google.protobuf.Int64Value create_weight = 5; + // [required] Create_weight_count contains the number of invocations of the create_weight method. Different invocations of the same method occur because the method is called on different documents. You can determine the selectivity of a query by comparing counts in different query components. + .google.protobuf.Int64Value create_weight_count = 6; + // [required] For some queries, document matching is performed in two steps. First, the document is matched approximately. Second, those documents that are approximately matched are examined through a more comprehensive process. For example, a phrase query first checks whether a document contains all terms in the phrase. Next, it verifies that the terms are in order (which is a more expensive process). The match field is non-zero only for those queries that use the two-step verification process. + .google.protobuf.Int64Value match = 7; + // [required] Match_count contains the number of invocations of the match method. Different invocations of the same method occur because the method is called on different documents. You can determine the selectivity of a query by comparing counts in different query components. + .google.protobuf.Int64Value match_count = 8; + // [required] Contains the amount of time required to execute the advanceShallow Lucene method. + .google.protobuf.Int64Value shallow_advance = 9; + // [required] Shallow_advance_count contains the number of invocations of the shallow_advance method. Different invocations of the same method occur because the method is called on different documents. You can determine the selectivity of a query by comparing counts in different query components. + .google.protobuf.Int64Value shallow_advance_count = 10; + // [required] The next_doc Lucene method returns the document ID of the next document that matches the query. This method is a special type of the advance method and is equivalent to advance(docId() + 1). The next_doc method is more convenient for many Lucene queries. The next_doc field contains the amount of time required to determine the next matching document, which varies depending on the query type. + .google.protobuf.Int64Value next_doc = 11; + // [required] Next_doc_count contains the number of invocations of the next_doc method. Different invocations of the same method occur because the method is called on different documents. You can determine the selectivity of a query by comparing counts in different query components. + .google.protobuf.Int64Value next_doc_count = 12; + // [required] Contains the time taken for a Scorer to score a particular document. + .google.protobuf.Int64Value score = 13; + // [required] Score_count contains the number of invocations of the score method. Different invocations of the same method occur because the method is called on different documents. You can determine the selectivity of a query by comparing counts in different query components. + .google.protobuf.Int64Value score_count = 14; + // [required] Contains the amount of time required to execute the getMaxScore Lucene method. + .google.protobuf.Int64Value compute_max_score = 15; + // [required] Compute_max_score_count contains the number of invocations of the compute_max_score method. Different invocations of the same method occur because the method is called on different documents. You can determine the selectivity of a query by comparing counts in different query components. + .google.protobuf.Int64Value compute_max_score_count = 16; + // [required] Contains the amount of time required to execute the setMinCompetitiveScore Lucene method. + .google.protobuf.Int64Value set_min_competitive_score = 17; + // [required] Set_min_competitive_score_count contains the number of invocations of the set_min_competitive_score method. Different invocations of the same method occur because the method is called on different documents. You can determine the selectivity of a query by comparing counts in different query components. + .google.protobuf.Int64Value set_min_competitive_score_count = 18; + +} + + + +message SearchRequestBody { + // [optional] In the optional aggs parameter, you can define any number of aggregations. Each aggregation is defined by its name and one of the types of aggregations that OpenSearch supports. + //map aggregations = 1; + + // [optional] The collapse parameter groups search results by a particular field value. This returns only the top document within each group, which helps reduce redundancy by eliminating duplicates. + FieldCollapse collapse = 2; + + // [optional] Whether to return details about how OpenSearch computed the document's score. Default is false. + .google.protobuf.BoolValue explain = 3; + + // [optional] ext object is to contain plugin-specific response fields. For example, in conversational search, the result of Retrieval Augmented Generation (RAG) is a single “hit” (answer). Plugin authors can include this answer in the search response as part of the ext object so that it is separate from the search hits. + ObjectMap ext = 4; + + // [optional] The starting index to search from. Default is 0. + .google.protobuf.Int32Value from = 5; + + // [optional] Highlighting emphasizes the search term(s) in the results so you can emphasize the query matches. + Highlight highlight = 6; + + // [optional] Whether to return how many documents matched the query. + TrackHits track_total_hits = 7; + + // [optional] Values used to boost the score of specified indexes. Specify in the format of : + repeated NumberMap indices_boost = 8; + + // [optional] The fields that OpenSearch should return using their docvalue forms. Specify a format to return results in a certain format, such as date and time. + repeated FieldAndFormat docvalue_fields = 9; + + RankContainer rank = 10; + + // [optional] Specify a score threshold to return only documents above the threshold. + .google.protobuf.FloatValue min_score = 11; + + // [optional] Use post_filter to refine search hits based on user selections while preserving all aggregation options. + QueryContainer post_filter = 12; + + // [optional] Profile provides timing information about the execution of individual components of a search request. Using the Profile API, you can debug slow requests and understand how to improve their performance. + .google.protobuf.BoolValue profile = 13; + + // [optional] The DSL query to use in the request. + QueryContainer query = 14; + + // [optional] Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. + repeated Rescore rescore = 15; + + // [optional] The script_fields parameter allows you to include custom fields whose values are computed using scripts in your search results. This can be useful for calculating values dynamically based on the document data. You can also retrieve derived fields by using a similar approach. + map script_fields = 16; + + // [optional] The search_after parameter provides a live cursor that uses the previous page's results to obtain the next page's results. It is similar to the scroll operation in that it is meant to scroll many queries in parallel. You can use search_after only when sorting is applied. + repeated FieldValue search_after = 17; + + // [optional] The number of results to return. Default is 10. + .google.protobuf.Int32Value size = 18; + + // [optional] You can use the scroll operation to retrieve a large number of results. For example, for machine learning jobs, you can request an unlimited number of results in batches. + SlicedScroll slice = 19; + + // [optional] Sorting allows your users to sort results in a way that's most meaningful to them. By default, full-text queries sort results by the relevance score. You can choose to sort the results by any field value in either ascending or descending order by setting the order parameter to asc or desc. + repeated SortCombinations sort = 20; + + // [optional] Whether to include the _source field in the response. + SourceConfig source = 21 [json_name = "_source"]; + + // [optional] The fields to search for in the request. Specify a format to return results in a certain format, such as date and time. + repeated FieldAndFormat fields = 22; + + // TODO: need to support suggest. + // [optional] The suggest feature suggests similar looking terms based on a provided text by using a suggester. The suggest request part is defined alongside the query part in a _search request. If the query part is left out, only suggestions are returned. + Suggester suggest = 23; + + // [optional] The maximum number of documents OpenSearch should process before terminating the request. If a query reaches this limit, OpenSearch terminates the query early. OpenSearch collects documents before sorting. Use with caution. OpenSearch applies this parameter to each shard handling the request. When possible, let OpenSearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. If set to `0` (default), the query does not terminate early. Default is 0. + .google.protobuf.Int32Value terminate_after = 24; + + // [optional] The period of time to wait for a response. Default is no timeout. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. + .google.protobuf.StringValue timeout = 25; + + // [optional] Whether to return document scores. Default is false. + .google.protobuf.BoolValue track_scores = 26; + + // [optional] Whether to include the document version in the response. + .google.protobuf.BoolValue version = 27; + + // [optional] Whether to return sequence number and primary term of the last operation of each document hit. + .google.protobuf.BoolValue seq_no_primary_term = 28; + + // [optional] A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this option is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response. + repeated string stored_fields = 29; + + // [optional] Point in Time (PIT) lets you run different queries against a dataset that is fixed in time. + PointInTimeReference pit = 30; + + // [optional] Value to associate with the request for additional logging. + repeated string stats = 31; + +} + +message TrackHits { + oneof track_hits{ + .google.protobuf.BoolValue bool_value = 1; + .google.protobuf.Int32Value int32_value = 2; + } +} + +message ClusterDetails { + + enum ClusterSearchStatus { + + CLUSTER_SEARCH_STATUS_INVALID = 0; + // The search failed on a cluster marked with skip_unavailable=false + CLUSTER_SEARCH_STATUS_FAILED = 1; + // Searches on at least one shard of the cluster was successful and at least one failed + CLUSTER_SEARCH_STATUS_PARTIAL = 2; + // Searches on all shards were successful + CLUSTER_SEARCH_STATUS_RUNNING = 3; + // The search failed on a cluster marked with skip_unavailable=true + CLUSTER_SEARCH_STATUS_SKIPPED = 4; + // Searches on all shards were successful + CLUSTER_SEARCH_STATUS_SUCCESSFUL = 5; + } + // [required] All possible cluster search states. + ClusterSearchStatus status = 1; + + // [required] The index expression supplied by the user. If you provide a wildcard such as logs-*, this section will show the value with the wildcard, not the concrete indices being searched. + .google.protobuf.StringValue indices = 2; + + // [optional] How long (in milliseconds) the sub-search took on that cluster. + .google.protobuf.Int64Value took = 3; + + // [required] If true, the request timed out before completion; returned results may be partial or empty. + .google.protobuf.BoolValue timed_out = 4; + + // [optional] The shard details for the sub-search on that cluster. + ShardStatistics shards = 5 [json_name = "_shards"]; + + // [optional] An array of any shard-specific failures that occurred during the search operation + repeated ShardFailure failures = 6; + +} + +message ClusterStatistics { + + // [required] Number of shards that skipped the request because a lightweight check helped realize that no documents could possibly match on this shard. This typically happens when a search request includes a range filter and the shard only has values that fall outside of that range. + .google.protobuf.Int32Value skipped = 1; + + // [required] Number of shards that executed the request successfully. + .google.protobuf.Int32Value successful = 2; + + // [required] Total number of shards that require querying, including unallocated shards. + .google.protobuf.Int32Value total = 3; + + // [required] Number of shards currently executing the search operation + .google.protobuf.Int32Value running = 4; + + // [required] The number of shards that returned partial results. + .google.protobuf.Int32Value partial = 5; + + // [required] Number of shards that failed to execute the request. Note that shards that are not allocated will be considered neither successful nor failed. Having failed+successful less than total is thus an indication that some of the shards were not allocated. + .google.protobuf.Int32Value failed = 6; + + // [optional] Shows metadata about the search on each cluster. + map details = 7; + +} + +message PhaseTook { + + // [required] Time taken in dfs_pre_query phase. + .google.protobuf.Int64Value dfs_pre_query = 1; + // [required] Time taken in query phase. + .google.protobuf.Int64Value query = 2; + // [required] Time taken in fetch phase. + .google.protobuf.Int64Value fetch = 3; + // [required] Time taken in dfs_query phase. + .google.protobuf.Int64Value dfs_query = 4; + // [required] Time taken in expand phase. + .google.protobuf.Int64Value expand = 5; + // [required] Time taken in can_match phase. + .google.protobuf.Int64Value can_match = 6; + +} + +// The response from search request. +message SearchResponse { + oneof response { + // The search success response + ResponseBody response_body = 1; + // The search 4xx error response + Error4xxResponseBody error_4xx_response = 2; + // The search 5xx error response + Error5xxResponse error_5xx_response = 3; + } +} + +// The response from index search request. +message IndexSearchResponse { + oneof response { + // [optional] The index search success response. + ResponseBody response_body = 1; + // [optional] The index search 4xx error response. + Error4xxResponseBody error_4xx_response = 2; + // [optional] The index search 5xx error response. + Error5xxResponse error_5xx_response = 3; + } +} + +// The 4xx error response from search/index-search request. +message Error4xxResponseBody { + // [optional] The search 4xx error response body. + OpenSearchException error = 1; + // [optional] The search 4xx error response status code. + .google.protobuf.Int32Value status = 2; +} + +// The 5xx error response from search/index-search request. +message Error5xxResponse { + // [optional] The search 5xx error response status code. + .google.protobuf.Int32Value status_code = 1; + // [optional] The search 5xx error content. + .google.protobuf.StringValue error = 2; + // [optional] The search 5xx error message. + .google.protobuf.StringValue message = 3; + // [optional] The search 5xx error additional_details. + .google.protobuf.Struct additional_details = 4; +} + +// The response body from a search/index-search request. +message ResponseBody { + + // [required] Milliseconds it took Elasticsearch to execute the request. + .google.protobuf.Int64Value took = 1; + + // [required] If true, the request timed out before completion; returned results may be partial or empty. + .google.protobuf.BoolValue timed_out = 2; + + // [required] Contains a count of shards used for the request. + ShardStatistics shards = 3 [json_name = "_shards"]; + + // [optional] Phase-level took time values in the response. + PhaseTook phase_took = 4; + + // [required] Contains returned documents and metadata. + HitsMetadata hits = 5; + + // [optional] When you search one or more remote clusters, a `_clusters` section is included to provide information about the search on each cluster. + ClusterStatistics clusters = 6 [json_name = "_clusters"]; + + // [optional] Retrieved specific fields in the search response + .google.protobuf.Struct fields = 7; + + // [optional] Highest returned document _score. + .google.protobuf.FloatValue max_score = 8; + + // [optional] The number of times that the coordinating node aggregates results from batches of shard responses + .google.protobuf.Int32Value num_reduce_phases = 9; + + // [optional] Contains profiling information. + Profile profile = 10; + + // [optional] The PIT ID. + .google.protobuf.StringValue pit_id = 11; + + // [optional] Identifier for the search and its search context. + .google.protobuf.StringValue scroll_id = 12 [json_name = "_scroll_id"]; + + // TODO: Need support suggest response. + // [optional] Provides search suggestions generated based on user input. + map suggest = 13; + + // [optional] If the query was terminated early, the terminated_early flag will be set to true in the response + .google.protobuf.BoolValue terminated_early = 14; + +} + +message QueryProfile { + + // [required] Contains timing statistics about low-level Lucene execution. + QueryBreakdown breakdown = 1; + + // [required] Contains a Lucene explanation of the query. Helps differentiate queries with the same type. + .google.protobuf.StringValue description = 2; + + // [required] The total elapsed time for this query, in nanoseconds. For concurrent segment search, time_in_nanos is the total time spent across all the slices (the difference between the last completed slice execution end time and the first slice execution start time). + .google.protobuf.Int64Value time_in_nanos = 3; + + // [required] The Lucene query type into which the search query was rewritten. Corresponds to the Lucene class name (which often has the same name in OpenSearch). + .google.protobuf.StringValue type = 4; + + // [optional] If a query has subqueries (children), this field contains information about the subqueries. + repeated QueryProfile children = 5; + +} + +message SearchProfile { + + // [required] Profiling information about the Lucene collectors that ran the search. + repeated Collector collector = 1; + + // [required] Profiling information about the query execution. + repeated QueryProfile query = 2; + + // [required] All Lucene queries are rewritten. A query and its children may be rewritten more than once, until the query stops changing. The rewriting process involves performing optimizations, such as removing redundant clauses or replacing a query path with a more efficient one. After the rewriting process, the original query may change significantly. The rewrite_time field contains the cumulative total rewrite time for the query and all its children, in nanoseconds. + .google.protobuf.Int64Value rewrite_time = 3; + +} + +message NumberMap { + map number_map = 1; +} + +message PointInTimeReference { + // [required] ID for the PIT to search. If you provide a pit object, this parameter is required. + .google.protobuf.StringValue id = 1; + + // [optional] Period of time used to extend the life of the PIT. Units can be `nanos`, `micros`, `ms` (milliseconds), `s` (seconds), `m` (minutes), `h` (hours) and `d` (days). Also accepts \"0\" without a unit and \"-1\" to indicate an unspecified value. + .google.protobuf.StringValue keep_alive = 2; + +} + +// The Search API operation to perform a search across all indices in the cluster. +message SearchRequest { + // [optional] Whether to include the _source field in the response. + SourceConfigParam source = 1 [json_name = "_source"]; + // [optional] A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `source_includes` query parameter. If the `source` parameter is `false`, this parameter is ignored. + repeated string source_excludes = 2; + // [optional] A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `source_excludes` query parameter. If the `source` parameter is `false`, this parameter is ignored. + repeated string source_includes = 3 ; + // [optional] Whether to ignore wildcards that don't match any indexes. Default is true. + .google.protobuf.BoolValue allow_no_indices = 4; + // [optional] Whether to return partial results if the request runs into an error or times out. Default is true. + .google.protobuf.BoolValue allow_partial_search_results = 5; + // [optional] Whether the update operation should include wildcard and prefix queries in the analysis. Default is false. + .google.protobuf.BoolValue analyze_wildcard = 6; + // [optional] Analyzer to use for the query string. This parameter can only be used when the q query .google.protobuf.StringValue parameter is specified. + .google.protobuf.StringValue analyzer = 7; + // [optional] How many shard results to reduce on a node. Default is 512. + .google.protobuf.Int32Value batched_reduce_size = 8; + // [optional] The time after which the search request will be canceled. Request-level parameter takes precedence over cancel_after_time_interval cluster setting. Default is -1. + .google.protobuf.StringValue cancel_after_time_interval = 9; + // [optional] Whether to minimize round-trips between a node and remote clusters. Default is true. + .google.protobuf.BoolValue ccs_minimize_roundtrips = 10; + // [optional] Indicates whether the default operator for a string query should be AND or OR. Default is OR. + Operator default_operator = 11; + enum Operator { + + OPERATOR_INVALID = 0; + // All terms need to match. The string `to be` is interpreted as `to AND be` + OPERATOR_AND = 1; + // Only one term needs to match. The string `to be` is interpreted as `to OR be` + OPERATOR_OR = 2; + } + // [optional] The default field in case a field prefix is not provided in the query string. + .google.protobuf.StringValue df = 12; + // [optional] The fields that OpenSearch should return using their docvalue forms. + repeated string docvalue_fields = 13; + // [optional] Specifies the type of index that wildcard expressions can match. Supports list of values. Default is open. + repeated ExpandWildcard expand_wildcards = 14; + enum ExpandWildcard { + + EXPAND_WILDCARD_INVALID = 0; + // Match any index + EXPAND_WILDCARD_ALL = 1; + // Match closed, non-hidden indexes + EXPAND_WILDCARD_CLOSED = 2; + // Match hidden indexes + EXPAND_WILDCARD_HIDDEN = 3; + // Deny wildcard expressions + EXPAND_WILDCARD_NONE = 4; + // Match open, non-hidden indexes + EXPAND_WILDCARD_OPEN = 5; + } + // [optional] Whether to return details about how OpenSearch computed the document's score. Default is false. + .google.protobuf.BoolValue explain = 15; + // [optional] The starting index to search from. Default is 0. + .google.protobuf.Int32Value from = 16; + // [optional] Whether to ignore concrete, expanded, or indexes with aliases if indexes are frozen. Default is true. + .google.protobuf.BoolValue ignore_throttled = 17; + // [optional] Specifies whether to include missing or closed indexes in the response and ignores unavailable shards during the search request. Default is false. + .google.protobuf.BoolValue ignore_unavailable = 18; + // [optional] Whether to return scores with named queries. Default is false. + .google.protobuf.BoolValue include_named_queries_score = 19; + // [optional] Specifies whether OpenSearch should accept requests if queries have format errors (for example, querying a text field for an integer). Default is false. + .google.protobuf.BoolValue lenient = 20; + // [optional] Numbers of concurrent shard requests this request should execute on each node. Default is 5. + .google.protobuf.Int32Value max_concurrent_shard_requests = 21; + // [optional] Whether to return phase-level took time values in the response. Default is false. + .google.protobuf.BoolValue phase_took = 22; + // [optional] A prefilter size threshold that triggers a prefilter operation if the request exceeds the threshold. Default is 128 shards. + .google.protobuf.Int32Value pre_filter_shard_size = 23; + // [optional] Specifies the shards or nodes on which OpenSearch should perform the search. + .google.protobuf.StringValue preference = 24; + // [optional] Query in the Lucene query string syntax using query parameter search. + .google.protobuf.StringValue q = 25; + // [optional] Specifies whether OpenSearch should use the request cache. Default is whether it's enabled in the index's settings. + .google.protobuf.BoolValue request_cache = 26; + // [optional] Indicates whether to return hits.total as an integer. Returns an object otherwise. Default is false. + .google.protobuf.BoolValue rest_total_hits_as_int = 27; + // [optional] Value used to route the update by query operation to a specific shard. + repeated string routing = 28; + // [optional] Period to keep the search context open. + .google.protobuf.StringValue scroll = 29; + // [optional] Customizable sequence of processing stages applied to search queries. + .google.protobuf.StringValue search_pipeline = 30; + // [optional] Whether OpenSearch should use global term and document frequencies when calculating relevance scores. Default is SEARCH_TYPE_QUERY_THEN_FETCH. + SearchType search_type = 31; + enum SearchType { + + SEARCH_TYPE_INVALID = 0; + // Scores documents using global term and document frequencies across all shards. It's usually slower but more accurate. + SEARCH_TYPE_DFS_QUERY_THEN_FETCH = 1; + // Scores documents using local term and document frequencies for the shard. It's usually faster but less accurate. + SEARCH_TYPE_QUERY_THEN_FETCH = 2; + } + // [optional] Whether to return sequence number and primary term of the last operation of each document hit. + .google.protobuf.BoolValue seq_no_primary_term = 32; + // [optional] Number of results to include in the response. + .google.protobuf.Int32Value size = 33; + // [optional] A list of : pairs to sort by. + repeated string sort = 34; + // [optional] Value to associate with the request for additional logging. + repeated string stats = 35; + // [optional] Whether the get operation should retrieve fields stored in the index. Default is false. + repeated string stored_fields = 36; + // [optional] Fields OpenSearch can use to look for similar terms. + .google.protobuf.StringValue suggest_field = 37; + // [optional] The mode to use when searching. This parameter can only be used when the `suggest_field` and `suggest_text` query .google.protobuf.StringValue parameters are specified. + SuggestMode suggest_mode = 38; + enum SuggestMode { + + SUGGEST_MODE_INVALID = 0; + // Use suggestions based on the provided terms + SUGGEST_MODE_ALWAYS = 1; + // Use suggestions for terms not in the index + SUGGEST_MODE_MISSING = 2; + // Use suggestions that have more occurrences + SUGGEST_MODE_POPULAR = 3; + } + // [optional] Number of suggestions to return. + .google.protobuf.Int32Value suggest_size = 39; + // [optional] The source that suggestions should be based off of. + .google.protobuf.StringValue suggest_text = 40; + // [optional] The maximum number of documents OpenSearch should process before terminating the request. Default is 0. + .google.protobuf.Int32Value terminate_after = 41; + // [optional] Period of time to wait for a response from active shards. Default is 1m. + .google.protobuf.StringValue timeout = 42; + // [optional] Whether to return document scores. Default is false. + .google.protobuf.BoolValue track_scores = 43; + // [optional] Whether to return how many documents matched the query. + TrackHits track_total_hits = 44; + // [optional] Whether returned aggregations and suggested terms should include their types in the response. Default is true. + .google.protobuf.BoolValue typed_keys = 45; + // [optional] Whether to include the document version as a match. Default is false + .google.protobuf.BoolValue version = 46; + // [optional] Search Request body + SearchRequestBody request_body = 47; + +} + +// The Search API operation to perform a search within a specific index (or indices). +message IndexSearchRequest { + // [required] A list of indices to search for documents. Allowing targeted searches within one or more specified indices. + repeated string index = 1; + // [optional] Whether to include the _source field in the response. + SourceConfigParam source = 2 [json_name = "_source"]; + // [optional] A list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `source_includes` query parameter. If the `source` parameter is `false`, this parameter is ignored. + repeated string source_excludes = 3; + // [optional] A list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `source_excludes` query parameter. If the `source` parameter is `false`, this parameter is ignored. + repeated string source_includes = 4 ; + // [optional] Whether to ignore wildcards that don't match any indexes. Default is true. + .google.protobuf.BoolValue allow_no_indices = 5; + // [optional] Whether to return partial results if the request runs into an error or times out. Default is true. + .google.protobuf.BoolValue allow_partial_search_results = 6; + // [optional] Whether the update operation should include wildcard and prefix queries in the analysis. Default is false. + .google.protobuf.BoolValue analyze_wildcard = 7; + // [optional] Analyzer to use for the query string. This parameter can only be used when the q query .google.protobuf.StringValue parameter is specified. + .google.protobuf.StringValue analyzer = 8; + // [optional] How many shard results to reduce on a node. Default is 512. + .google.protobuf.Int32Value batched_reduce_size = 9; + // [optional] The time after which the search request will be canceled. Request-level parameter takes precedence over cancel_after_time_interval cluster setting. Default is -1. + .google.protobuf.StringValue cancel_after_time_interval = 10; + // [optional] Whether to minimize round-trips between a node and remote clusters. Default is true. + .google.protobuf.BoolValue ccs_minimize_roundtrips = 11; + // [optional] Indicates whether the default operator for a string query should be AND or OR. Default is OR. + Operator default_operator = 12; + enum Operator { + + OPERATOR_INVALID = 0; + // All terms need to match. The string `to be` is interpreted as `to AND be` + OPERATOR_AND = 1; + // Only one term needs to match. The string `to be` is interpreted as `to OR be` + OPERATOR_OR = 2; + } + // [optional] The default field in case a field prefix is not provided in the query string. + .google.protobuf.StringValue df = 13; + // [optional] The fields that OpenSearch should return using their docvalue forms. + repeated string docvalue_fields = 14; + // [optional] Specifies the type of index that wildcard expressions can match. Supports list of values. Default is open. + repeated ExpandWildcard expand_wildcards = 15; + enum ExpandWildcard { + + EXPAND_WILDCARD_INVALID = 0; + // Match any index + EXPAND_WILDCARD_ALL = 1; + // Match closed, non-hidden indexes + EXPAND_WILDCARD_CLOSED = 2; + // Match hidden indexes + EXPAND_WILDCARD_HIDDEN = 3; + // Deny wildcard expressions + EXPAND_WILDCARD_NONE = 4; + // Match open, non-hidden indexes + EXPAND_WILDCARD_OPEN = 5; + } + // [optional] Whether to return details about how OpenSearch computed the document's score. Default is false. + .google.protobuf.BoolValue explain = 16; + // [optional] The starting index to search from. Default is 0. + .google.protobuf.Int32Value from = 17; + // [optional] Whether to ignore concrete, expanded, or indexes with aliases if indexes are frozen. Default is true. + .google.protobuf.BoolValue ignore_throttled = 18; + // [optional] Specifies whether to include missing or closed indexes in the response and ignores unavailable shards during the search request. Default is false. + .google.protobuf.BoolValue ignore_unavailable = 19; + // [optional] Whether to return scores with named queries. Default is false. + .google.protobuf.BoolValue include_named_queries_score = 20; + // [optional] Specifies whether OpenSearch should accept requests if queries have format errors (for example, querying a text field for an integer). Default is false. + .google.protobuf.BoolValue lenient = 21; + // [optional] Numbers of concurrent shard requests this request should execute on each node. Default is 5. + .google.protobuf.Int32Value max_concurrent_shard_requests = 22; + // [optional] Whether to return phase-level took time values in the response. Default is false. + .google.protobuf.BoolValue phase_took = 23; + // [optional] A prefilter size threshold that triggers a prefilter operation if the request exceeds the threshold. Default is 128 shards. + .google.protobuf.Int32Value pre_filter_shard_size = 24; + // [optional] Specifies the shards or nodes on which OpenSearch should perform the search. For valid values see "https://opensearch.org/docs/latest/api-reference/search/#the-preference-query-parameter" + .google.protobuf.StringValue preference = 25; + // [optional] Query in the Lucene query string syntax using query parameter search. + .google.protobuf.StringValue q = 26; + // [optional] Specifies whether OpenSearch should use the request cache. Default is whether it's enabled in the index's settings. + .google.protobuf.BoolValue request_cache = 27; + // [optional] Indicates whether to return hits.total as an integer. Returns an object otherwise. Default is false. + .google.protobuf.BoolValue rest_total_hits_as_int = 28; + // [optional] Value used to route the update by query operation to a specific shard. + repeated string routing = 29; + // [optional] Period to keep the search context open. + .google.protobuf.StringValue scroll = 30; + // [optional] Customizable sequence of processing stages applied to search queries. + .google.protobuf.StringValue search_pipeline = 31; + // [optional] Whether OpenSearch should use global term and document frequencies when calculating relevance scores. Default is SEARCH_TYPE_QUERY_THEN_FETCH. + SearchType search_type = 32; + enum SearchType { + + SEARCH_TYPE_INVALID = 0; + // Scores documents using global term and document frequencies across all shards. It's usually slower but more accurate. + SEARCH_TYPE_DFS_QUERY_THEN_FETCH = 1; + // Scores documents using local term and document frequencies for the shard. It's usually faster but less accurate. + SEARCH_TYPE_QUERY_THEN_FETCH = 2; + } + // [optional] Whether to return sequence number and primary term of the last operation of each document hit. + .google.protobuf.BoolValue seq_no_primary_term = 33; + // [optional] Number of results to include in the response. + .google.protobuf.Int32Value size = 34; + // [optional] A list of : pairs to sort by. + repeated string sort = 35; + // [optional] Value to associate with the request for additional logging. + repeated string stats = 36; + // [optional] Whether the get operation should retrieve fields stored in the index. Default is false. + repeated string stored_fields = 37; + // [optional] Fields OpenSearch can use to look for similar terms. + .google.protobuf.StringValue suggest_field = 38; + // [optional] The mode to use when searching. This parameter can only be used when the `suggest_field` and `suggest_text` query .google.protobuf.StringValue parameters are specified. + SuggestMode suggest_mode = 39; + enum SuggestMode { + + SUGGEST_MODE_INVALID = 0; + // Use suggestions based on the provided terms + SUGGEST_MODE_ALWAYS = 1; + // Use suggestions for terms not in the index + SUGGEST_MODE_MISSING = 2; + // Use suggestions that have more occurrences + SUGGEST_MODE_POPULAR = 3; + } + // [optional] Number of suggestions to return. + .google.protobuf.Int32Value suggest_size = 40; + // [optional] The source that suggestions should be based off of. + .google.protobuf.StringValue suggest_text = 41; + // [optional] The maximum number of documents OpenSearch should process before terminating the request. Default is 0. + .google.protobuf.Int32Value terminate_after = 42; + // [optional] Period of time to wait for a response from active shards. Default is 1m. + .google.protobuf.StringValue timeout = 43; + // [optional] Whether to return document scores. Default is false. + .google.protobuf.BoolValue track_scores = 44; + // [optional] Whether to return how many documents matched the query. + TrackHits track_total_hits = 45; + // [optional] Whether returned aggregations and suggested terms should include their types in the response. Default is true. + .google.protobuf.BoolValue typed_keys = 46; + // [optional] Whether to include the document version as a match. Default is false + .google.protobuf.BoolValue version = 47; + // [optional] Search Request body + IndexSearchRequestBody request_body = 48; + +} + +message IndexSearchRequestBody { + // [optional] In the optional aggs parameter, you can define any number of aggregations. Each aggregation is defined by its name and one of the types of aggregations that OpenSearch supports. + //map aggregations = 1; + + // [optional] The collapse parameter groups search results by a particular field value. This returns only the top document within each group, which helps reduce redundancy by eliminating duplicates. + FieldCollapse collapse = 2; + + // [optional] Whether to return details about how OpenSearch computed the document's score. Default is false. + .google.protobuf.BoolValue explain = 3; + + // [optional] ext object is to contain plugin-specific response fields. For example, in conversational search, the result of Retrieval Augmented Generation (RAG) is a single “hit” (answer). Plugin authors can include this answer in the search response as part of the ext object so that it is separate from the search hits. + ObjectMap ext = 4; + + // [optional] The starting index to search from. Default is 0. + .google.protobuf.Int32Value from = 5; + + // [optional] Highlighting emphasizes the search term(s) in the results so you can emphasize the query matches. + Highlight highlight = 6; + + // [optional] Whether to return how many documents matched the query. + TrackHits track_total_hits = 7; + + // [optional] Values used to boost the score of specified indexes. Specify in the format of : + repeated NumberMap indices_boost = 8; + + // [optional] The fields that OpenSearch should return using their docvalue forms. Specify a format to return results in a certain format, such as date and time. + repeated FieldAndFormat docvalue_fields = 9; + + RankContainer rank = 10; + + // [optional] Specify a score threshold to return only documents above the threshold. + .google.protobuf.FloatValue min_score = 11; + + // [optional] Use post_filter to refine search hits based on user selections while preserving all aggregation options. + QueryContainer post_filter = 12; + + // [optional] Profile provides timing information about the execution of individual components of a search request. Using the Profile API, you can debug slow requests and understand how to improve their performance. + .google.protobuf.BoolValue profile = 13; + + // [optional] The DSL query to use in the request. + QueryContainer query = 14; + + // [optional] Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. + repeated Rescore rescore = 15; + + // [optional] The script_fields parameter allows you to include custom fields whose values are computed using scripts in your search results. This can be useful for calculating values dynamically based on the document data. You can also retrieve derived fields by using a similar approach. + map script_fields = 16; + + // [optional] The search_after parameter provides a live cursor that uses the previous page's results to obtain the next page's results. It is similar to the scroll operation in that it is meant to scroll many queries in parallel. You can use search_after only when sorting is applied. + repeated FieldValue search_after = 17; + + // [optional] The number of results to return. Default is 10. + .google.protobuf.Int32Value size = 18; + + // [optional] You can use the scroll operation to retrieve a large number of results. For example, for machine learning jobs, you can request an unlimited number of results in batches. + SlicedScroll slice = 19; + + // [optional] Sorting allows your users to sort results in a way that's most meaningful to them. By default, full-text queries sort results by the relevance score. You can choose to sort the results by any field value in either ascending or descending order by setting the order parameter to asc or desc. + repeated SortCombinations sort = 20; + + // [optional] Whether to include the _source field in the response. + SourceConfig source = 21 [json_name = "_source"]; + + // [optional] The fields to search for in the request. Specify a format to return results in a certain format, such as date and time. + repeated FieldAndFormat fields = 22; + + // [optional] The suggest feature suggests similar looking terms based on a provided text by using a suggester. The suggest request part is defined alongside the query part in a _search request. If the query part is left out, only suggestions are returned. + Suggester suggest = 23; + + // [optional] The maximum number of documents OpenSearch should process before terminating the request. If a query reaches this limit, OpenSearch terminates the query early. OpenSearch collects documents before sorting. Use with caution. OpenSearch applies this parameter to each shard handling the request. When possible, let OpenSearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. If set to `0` (default), the query does not terminate early. Default is 0. + .google.protobuf.Int32Value terminate_after = 24; + + // [optional] The period of time to wait for a response. Default is no timeout. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. + .google.protobuf.StringValue timeout = 25; + + // [optional] Whether to return document scores. Default is false. + .google.protobuf.BoolValue track_scores = 26; + + // [optional] Whether to include the document version in the response. + .google.protobuf.BoolValue version = 27; + + // [optional] Whether to return sequence number and primary term of the last operation of each document hit. + .google.protobuf.BoolValue seq_no_primary_term = 28; + + // [optional] A list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this option is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response. + repeated string stored_fields = 29; + + // [optional] Point in Time (PIT) lets you run different queries against a dataset that is fixed in time. + PointInTimeReference pit = 30; + + // [optional] Value to associate with the request for additional logging. + repeated string stats = 31; + +} + +message RankContainer { + + oneof rank_container { + RrfRank rrf = 1; + } + +} + +message RrfRank { + + ObjectMap object = 1; + + // How much influence documents in individual result sets per query have over the final ranked result set + .google.protobuf.FloatValue rank_constant = 2; + + // Size of the individual result sets per query + .google.protobuf.FloatValue window_size = 3; + +} + +message RescoreQuery { + + // [required] A second query only on the Top-K results returned by the query and post_filter phases. + QueryContainer rescore_query = 1; + + // [optional] The relative importance of the original query as compared to the rescore query. + .google.protobuf.FloatValue query_weight = 2; + + // [optional] The relative importance of the rescore query as compared to the original query. + .google.protobuf.FloatValue rescore_query_weight = 3; + + enum ScoreMode { + + SCORE_MODE_INVALID = 0; + // Average the original score and the rescore query score. + SCORE_MODE_AVG = 1; + // Take the max of original score and the rescore query score. + SCORE_MODE_MAX = 2; + // Take the min of the original score and the rescore query score. + SCORE_MODE_MIN = 3; + // Multiply the original score by the rescore query score. Useful for function query rescores. + SCORE_MODE_MULTIPLY = 4; + // Add the original score and the rescore query score. The default. + SCORE_MODE_TOTAL = 5; + } + + // [optional] Control the way the scores are combined. + ScoreMode score_mode = 4; + +} + +message Rescore { + + // [required] Contains the rescore_query, which is the secondary query used to adjust the scores of the initial results + RescoreQuery query = 1; + + // [optional] The number of docs which will be examined on each shard can be controlled. + .google.protobuf.Int32Value window_size = 2; + +} + +message SlicedScroll { + + // [optional] Specific document field by which slicing is performed. + .google.protobuf.StringValue field = 1; + + // [required] The id of the slice + .google.protobuf.StringValue id = 2; + + // [required] The maximum number of slices + .google.protobuf.Int32Value max = 3; + +} + +message Suggester { + + // [optional] Global suggest text, to avoid repetition when the same text is used in several suggesters + .google.protobuf.StringValue text = 1; + +} + +message ShardProfile { + + // [required] Profiling information about the aggregation execution. + repeated AggregationProfile aggregations = 1; + + // [required] The shard ID of the shard in the [node-ID][index-name][shard-ID] format. + .google.protobuf.StringValue id = 2; + + // [required] Search represents a query executed against the underlying Lucene index. Most search requests execute a single search against a Lucene index, but some search requests can execute more than one search. For example, including a global aggregation results in a secondary match_all query for the global context. The profile.shards array contains profiling information about each search execution. + repeated SearchProfile searches = 3; + + // [optional] Fetch timing and debug information. + FetchProfile fetch = 4; + +} + +message SuggestArray { + + repeated Suggest suggest_array = 1; + +} + +message Suggest { + oneof suggest { + CompletionSuggest completion_suggest = 1; + PhraseSuggest phrase_suggest = 2; + TermSuggest term_suggest = 3; + } + +} + +message CompletionSuggest { + + .google.protobuf.Int32Value length = 1; + + .google.protobuf.Int32Value offset = 2; + + .google.protobuf.StringValue text = 3; + + repeated CompletionSuggestOption options = 4; + +} + +message CompletionSuggestOption { + + .google.protobuf.BoolValue collate_match = 1; + + map contexts = 2; + + .google.protobuf.Struct fields = 3; + + .google.protobuf.StringValue id = 4 [json_name = "_id"]; + + .google.protobuf.StringValue index = 5 [json_name = "_index"]; + + repeated string routing = 6 [json_name = "_routing"]; + + .google.protobuf.FloatValue underscore_score = 7 [json_name = "_score"]; + + .google.protobuf.Struct source = 8 [json_name = "_source"]; + + .google.protobuf.StringValue text = 9; + + .google.protobuf.FloatValue score = 10; + +} + +message ContextArray { + repeated Context context_array = 1; +} + +message Context { + oneof context { + .google.protobuf.StringValue string_value = 1; + GeoLocation geo_location = 2; + } + +} + + +message PhraseSuggest { + + .google.protobuf.Int32Value length = 1; + + .google.protobuf.Int32Value offset = 2; + + .google.protobuf.StringValue text = 3; + + repeated PhraseSuggestOption options = 4; + +} + +message PhraseSuggestOption { + + .google.protobuf.StringValue text = 1; + + .google.protobuf.FloatValue score = 2; + + .google.protobuf.StringValue highlighted = 3; + + .google.protobuf.BoolValue collate_match = 4; + +} + +message TermSuggestOption { + + .google.protobuf.StringValue text = 1; + + .google.protobuf.FloatValue score = 2; + + .google.protobuf.DoubleValue freq = 3; + + .google.protobuf.StringValue highlighted = 4; + + .google.protobuf.BoolValue collate_match = 5; + +} + +message TermSuggest { + + .google.protobuf.Int32Value length = 1; + + .google.protobuf.Int32Value offset = 2; + + .google.protobuf.StringValue text = 3; + + repeated TermSuggestOption options = 4; + +} + +// GET {index}/_explain/{id} +// POST {index}/_explain/{id} +message ExplainRequest { + // Defines the document ID. + .google.protobuf.StringValue id = 1; + // Index names used to limit the request. Only a single index name can be provided to this parameter. + .google.protobuf.StringValue index = 2; + // True or false to return the `_source` field or not, or a list of fields to return. + SourceConfigParam source = 3 [json_name = "_source"]; + // A comma-separated list of source fields to exclude from the response. + repeated string source_excludes = 4; + // A comma-separated list of source fields to include in the response. + repeated string source_includes = 5 ; + // If `true`, wildcard and prefix queries are analyzed. + .google.protobuf.BoolValue analyze_wildcard = 6; + // Analyzer to use for the query string. This parameter can only be used when the `q` query string parameter is specified. + .google.protobuf.StringValue analyzer = 7; + // The default operator for query string query: `AND` or `OR`. + Operator default_operator = 8; + enum Operator { + + OPERATOR_INVALID = 0; + OPERATOR_AND = 1; + OPERATOR_OR = 2; + } + // Field to use as default where no field prefix is given in the query string. + .google.protobuf.StringValue df = 9; + // If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. + .google.protobuf.BoolValue lenient = 10; + // Specifies the node or shard the operation should be performed on. Random by default. For valid values see "https://opensearch.org/docs/latest/api-reference/search/#the-preference-query-parameter" + .google.protobuf.StringValue preference = 11; + // Query in the Lucene query string syntax. + .google.protobuf.StringValue q = 12; + // Custom value used to route operations to a specific shard. + repeated string routing = 13; + // A comma-separated list of stored fields to return in the response. + repeated string stored_fields = 14; + ExplainRequestBody request_body = 15; +} + +message ExplainRequestBody { + QueryContainer query = 1; +} + +// GET {index}/_explain/{id} response +// POST {index}/_explain/{id} response +message ExplainResponse { + oneof response { + ExplainResponseBody explain_response_body = 1; + ExplainErrorResponse explain_error_response = 2; + } +} + +message ExplainErrorResponse { + OpenSearchException error = 1; + .google.protobuf.Int32Value status = 2; +} + +message ExplainResponseBody { + .google.protobuf.StringValue index = 1 [json_name = "_index"]; + + .google.protobuf.StringValue id = 2 [json_name = "_id"]; + + .google.protobuf.BoolValue matched = 3; + + ExplanationDetail explanation = 4; + + InlineGet get = 5; +} diff --git a/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java b/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java index c34f13041cb11..c64359d775e1b 100644 --- a/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java +++ b/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java @@ -70,7 +70,7 @@ import static java.net.InetAddress.getByName; import static java.util.Arrays.asList; -import static org.opensearch.http.AbstractHttpServerTransport.resolvePublishPort; +import static org.opensearch.common.network.NetworkService.resolvePublishPort; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -102,21 +102,21 @@ public void testHttpPublishPort() throws Exception { int otherBoundPort = randomIntBetween(9200, 9300); int publishPort = resolvePublishPort( - Settings.builder().put(HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT.getKey(), 9080).build(), + 9080, randomAddresses(), getByName("127.0.0.2") ); assertThat("Publish port should be explicitly set to 9080", publishPort, equalTo(9080)); publishPort = resolvePublishPort( - Settings.EMPTY, + -1, asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)), getByName("127.0.0.1") ); assertThat("Publish port should be derived from matched address", publishPort, equalTo(boundPort)); publishPort = resolvePublishPort( - Settings.EMPTY, + -1, asList(address("127.0.0.1", boundPort), address("127.0.0.2", boundPort)), getByName("127.0.0.3") ); @@ -125,7 +125,7 @@ public void testHttpPublishPort() throws Exception { final BindHttpException e = expectThrows( BindHttpException.class, () -> resolvePublishPort( - Settings.EMPTY, + -1, asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)), getByName("127.0.0.3") ) @@ -133,7 +133,7 @@ public void testHttpPublishPort() throws Exception { assertThat(e.getMessage(), containsString("Failed to auto-resolve http publish port")); publishPort = resolvePublishPort( - Settings.EMPTY, + -1, asList(address("0.0.0.0", boundPort), address("127.0.0.2", otherBoundPort)), getByName("127.0.0.1") ); @@ -141,7 +141,7 @@ public void testHttpPublishPort() throws Exception { if (NetworkUtils.SUPPORTS_V6) { publishPort = resolvePublishPort( - Settings.EMPTY, + -1, asList(address("0.0.0.0", boundPort), address("127.0.0.2", otherBoundPort)), getByName("::1") ); diff --git a/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java b/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java index ded457601c0ae..aeeaedc7dd321 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java +++ b/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java @@ -107,6 +107,7 @@ List adjustNodesStats(List nodesStats) { ), nodeStats.getTransport(), nodeStats.getHttp(), + nodeStats.getGrpc(), nodeStats.getBreaker(), nodeStats.getScriptStats(), nodeStats.getDiscoveryStats(), diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index fa5fb736f518f..3595bf6567de2 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -2753,6 +2753,7 @@ public void ensureEstimatedStats() { false, false, false, + false, false ); assertThat(