From 6e380d0bd921aa69b1ab0ae2da4b878d2a9d2c9d Mon Sep 17 00:00:00 2001
From: GongYi
Describe the issue:
I am new to opensearch.
+I am not able to upate the defaulRoute, timepicker:timeDefaults etc settings which is under AdvanceSettings in opensearch-dashboard via curl command.
Configuration:
+I tried below commands, i got some output but the page is not redirected to /app/discover as weel the settings in the UI still show /app/home
curl -u ‘****:*****’ -k -X POST “http://localhost:5601/api/saved_objects/config” -H “osd-xsrf: true” -H “content-type: application/json; charset=utf-8” -H “securitytenant: global” -d ‘{“attributes”: {“defaultRoute”: “/app/discover”}}’
{“type”:“config”,“id”:“81288fd0-658c-11ee-b242-b3c553df404d”,“attributes”:
+{“defaultRoute”:“/app/discover”},“references”:,“migrationVersion”{“config”:“7.9.0”},“updated_at”:“2023-10-08T03:41:03.181Z”,“version”:“WzE2NCw0XQ==”,“namespaces”:[“default”]}
I searched on internet and found another command but it gave me error
+curl -u ‘:’ -k -X POST “http://localhost:5601/api/opensearch-dashboards/settings” -H “osd-xsrf: true” -H “content-type: application/json; charset=utf-8” -H “securitytenant: global” -d ’
+{ “attributes”: {
+“defaultRoute”:“/app/discover”
+}
+}’
Relevant Logs or Screenshots:
1 post - 1 participant
+ ]]> + + https://forum.opensearch.org/t/update-defaulroute-timepicker-timedefaults-via-curl-in-opensearch-dashboards/16184 +Describe the issue:
+Is it feasible to utilize distinct root CA certificates for transport-layer and REST-layer communications? Specifically, I’d like to implement a self-signed root CA certificate for transport-layer (node-to-node) communication, while employing a trusted Certificate Authority for the REST-layer (client-to-node) traffic.
Can this configuration be achieved? Additionally, is it possible to utilize the securityadmin.sh script to update the security settings accordingly?
Configuration:
+Docker-Compose
1 post - 1 participant
+ ]]> +Error “WARN com.amazon.dataprepper.plugins.sink.opensearch.OpenSearchSink - Document [org.opensearch.client.opensearch.core.bulk.BulkOperation@c4aecd] has failure: java.lang.RuntimeException: only write ops with an op_type of create are allowed in data streams”
Configuration:
+data-prepper:latests, opensearch v 2.10.0,
kafka-sysmon_security_eventlog-pipeline:
+ source:
+ kafka:
+ acknowledgments: true
+ encryption:
+ type: none
+ bootstrap_servers:
+ - xxxxxxxx:9094
+ - xxxxxxxx:9095
+ - xxxxxxxx:9096
+ topics:
+ - name: "sysmon_security_eventlog"
+ group_id: "data_prepper"
+ key_mode: "discard"
+ serde_format: "json"
+ auto_commit: true
+ processor:
+ - aggregate:
+ identification_keys: ["event.provider","event.code","event.outcome","host.name","winlog.event_data.AuthenticationPackageName","winlog.event_data.TargetDomainName","winlog.event_data.TargetUserName","winlog.event_data.TargetUserSid","winlog.event_data.WorkstationName"]
+ action:
+ remove_duplicates:
+ group_duration: 30s
+ sink:
+ - opensearch:
+ hosts: ["https://xxxxxxxx:9200"]
+ username: xxxxxxxx
+ password: xxxxxxxx
+ insecure: true
+ connect_timeout: 60000
+ index: logs-events-sysmon_security_eventlog
+ index_type: management_disabled
+
Is any progress there ?
+The bug has been open for almost a year and so far nothing has moved, whereas this feature would be very necessary.
+Or is there some workaround?
1 post - 1 participant
+ ]]> +Describe the issue:
+Currently we have Elasticsearch set up, but we are moving to OpenSearch. I’ve set up OpenSearch output following the guide, but OpenSearch is only receiving logs once per hour, while Elasticsearch receives them almost constantly.
The Logstash logs don’t seem out of the ordinary, so I am not sure if this is an issue with the OpenSearch Output setup, or some OpenSearch config I missed.
Configuration:
+Logstash Output:
output {
+ if "event-timed-out" in [tags] {
+ elasticsearch {
+ id => "fallback-elasticsearch-output"
+ hosts => ["localhost:9200"]
+ index => "fallback-bisappslogs-%{+YYYY.MM.dd}"
+ }
+ opensearch {
+ index => "fallback-bisappslogs-%{+YYYY.MM.dd}"
+ hosts => ["host"]
+ user => "${}"
+ password => "${}"
+ ssl_certificate_verification => true
+ }
+ } else {
+ elasticsearch {
+ id => "elasticsearch-output"
+ hosts => ["localhost:9200"]
+ index => "bisappslogs-%{+YYYY.MM.dd}"
+ }
+ opensearch {
+ index => "bisappslogs-%{+YYYY.MM.dd}"
+ hosts => ["host"]
+ user => "${}"
+ password => "${}"
+ ssl_certificate_verification => true
+ }
+ }
+}
+
Relevant Logs or Screenshots:
3 posts - 2 participants
+ ]]> +Describe the issue:
+I’m trying to create a new Logstash pipeline which uses the OpenSearch input plugin. Seems a fairly simple setup, but some reason I’m getting the following error:
PKIX path building failed, unable to find valid certificate path to requested target
I have other OpenSearch outputs that are working, so bit confused why this is broken?
Any idea of what to check appreciated.
7 posts - 2 participants
+ ]]> +Describe the issue:
+I have a OpenSearch Service instance on AWS with 2 nodes, I created an index with a knn field. My queries took 10 seconds.
+The index contains more than 2 millions of data with size of 11gb.
How to improve the knn search?
Configuration:
+Schema:
{
+ "settings": {
+ "index": {
+ "number_of_shards": 2,
+ "number_of_replicas": 0,
+ "knn": true,
+ "knn.algo_param.ef_search": 512
+ }
+ },
+ "mappings": {
+ "dynamic": "false",
+ "properties": {
+ "title": {
+ "type": "text"
+ },
+ "text": {
+ "type": "text"
+ }
+ "vector": {
+ "type": "knn_vector",
+ "dimension": 256,
+ "method": {
+ "name": "hnsw",
+ "space_type": "l2",
+ "engine": "nmslib",
+ "parameters": {
+ "ef_construction": 2000,
+ "m": 16
+ }
+ }
+ }
+ }
+ }
+}
+
Query:
{
+ "highlight": {
+ "fields": {
+ "text": {}
+ },
+ "fragment_size": 1000,
+ "fragmenter": "simple",
+ "highlight_query": {
+ "match": {
+ "text": "Why my OpenSearch vector search is slow?"
+ }
+ },
+ "no_match_size": 1000,
+ "number_of_fragments": 5,
+ "post_tags": "</em>",
+ "pre_tags": "<em>"
+ },
+ "_source": [
+ "title",
+ "text"
+ ],
+ "size": 100,
+ "query": {
+ "knn": {
+ "vector": {
+ "k": 100,
+ "vector": [0.06414795,-0.033294678,...,-0.06677246]
+ }
+ }
+ }
+}
+
Relevant Logs or Screenshots:
1 post - 1 participant
+ ]]> +Describe the issue:
+I have a OpenSearch Service instance on AWS with 2 nodes, during each indexation with knn vectors, one node was lost near the end of indexation. Thanks to the automatic remediation of red clusters, the lost node was restored 30 minutes later.
The vector index contains some text fields and a knn field, with more than 2 millions documents, the total size is 11gb in index.
+I have a Python3 program to get data from an index with scroll, and run vector indexation with _bulk
How to avoid the problem of lost node?
Configuration:
+OpenSearch Service instance: type c6g.xlarge.search on AWS, 2 nodes with 4 vCPU, 8 Gb RAM, 200 Gb storage, 6000 IOPS, 256 Mo/s of each node
Schema:
{
+ "settings": {
+ "index": {
+ "number_of_shards": 2,
+ "number_of_replicas": 0,
+ "knn": true,
+ "knn.algo_param.ef_search": 512
+ }
+ },
+ "mappings": {
+ "dynamic": "false",
+ "properties": {
+ "title": {
+ "type": "text"
+ },
+ "text": {
+ "type": "text"
+ }
+ "vector": {
+ "type": "knn_vector",
+ "dimension": 256,
+ "method": {
+ "name": "hnsw",
+ "space_type": "l2",
+ "engine": "nmslib",
+ "parameters": {
+ "ef_construction": 2000,
+ "m": 16
+ }
+ }
+ }
+ }
+ }
+}
+
Relevant Logs or Screenshots:
+IndexingRate
1 post - 1 participant
+ ]]> +So, my questions:
Is it hopeless to continue with Filebeat?
When OpenSearch forked, did they also fork a version of FileBeat?
If we have to give up on Filebeat, could anyone point me to other options? Any recommendations? Our Filebeat needs are fairly simple. We want something light-weight that we can get up and running quickly.
THANK YOU!!
5 posts - 2 participants
+ ]]> +{ “size”: 1000, “query”: { “bool”: { “filter”: [ { “term”: { “module”: { “value”: “ncs”, “boost”: 1 } } } ], “adjust_pure_negative”: true, “boost”: 1 } }, “_source”: { “includes”: [ “module”, “descp”, “_id” ], “excludes”: }, “sort”: [ { “module”: { “order”: “asc” } } ] }
I would like to know if the new generic client will support json requests that specify _source “includes” or “excludes” as an option to defining the return fields via a Java class, and if a generic client is planned before HLRC support is removed in the 4.0 release. This approach would remove the need to filter out unwanted class fields and reduce bandwidth costs over time as well.
1 post - 1 participant
+ ]]> +Describe the issue:
+I setup a docker-compose file to try to send Node.js traces to Opensearch using Opentelemetry and Data prepper but I keep getting error messages from opentelemetry that it is not able to connect to data prepper.
+Data prepper is also displaying an error about not being able to create the opensearch sink connector Caused by: java.io.IOException: opensearch: Name or service not known
I tried changing the name of the service in the data-prepper config but that did not work.
Configuration:
+Here are the docker-compose files for Opentelemetry, Data Prepper, Opensearch and Opensearch-Dashboards
version: "3"
+
+services:
+ otel-collector:
+ image: otel/opentelemetry-collector-contrib:0.59.0
+ command:
+ - "--config"
+ - "/otel-local-config.yaml"
+ volumes:
+ - ./tracetest/collector.config.yaml:/otel-local-config.yaml
+ depends_on:
+ data-prepper:
+ condition: service_started
+
+ data-prepper:
+ restart: unless-stopped
+ image: opensearchproject/data-prepper:1.5.1
+ volumes:
+ - ./tracetest/opensearch/opensearch-analytics.yaml:/usr/share/data-prepper/pipelines.yaml
+ - ./tracetest/opensearch/opensearch-data-prepper-config.yaml:/usr/share/data-prepper/data-prepper-config.yaml
+ depends_on:
+ opensearch:
+ condition: service_started
+
+ opensearch:
+ # container_name: node-3.example.com
+ image: opensearchproject/opensearch:2.10.0
+ environment:
+ - discovery.type=single-node
+ - bootstrap.memory_lock=true
+ - OPENSEARCH_JAVA_OPTS=-Xms512m -Xmx512m
+ ulimits:
+ memlock:
+ soft: -1
+ hard: -1
+ nofile:
+ soft: 65536
+ hard: 65536
+ ports:
+ - "9200:9200"
+ - "9600:9600"
+ # healthcheck:
+ # test: curl -s http://localhost:9200 >/dev/null || exit 1
+ # interval: 5s
+ # timeout: 10s
+ # retries: 5
+ networks:
+ - my_network
+
+ dashboards:
+ image: opensearchproject/opensearch-dashboards:2.10.0
+ container_name: opensearch-dashboards3
+ ports:
+ - "5601:5601"
+ expose:
+ - "5601"
+ environment:
+ OPENSEARCH_HOSTS: '["https://opensearch:9200"]'
+ depends_on:
+ - opensearch
+ networks:
+ - my_network
+
+networks:
+ my_network:
+
The node.js app is in a separate docker-compose.yml file below:
version: '3'
+services:
+ app:
+ image: quick-start-nodejs
+ build: .
+ ports:
+ - "8080:8080"
+ networks:
+ - my_network
+
+networks:
+ my_network:
+
+
this is the Dockerfile also for the node.js app:
FROM node:slim
+WORKDIR /usr/src/app
+COPY package*.json ./
+RUN npm install
+COPY . .
+EXPOSE 8080
+CMD [ "npm", "run", "with-grpc-tracer" ]
+
+
I used this document [Running Tracetest with OpenSearch | Tracetest Docs](Running Tracetest with OpenSearch | Tracetest Docs to get things up and running but then the configuration did not have opensearch-dashboards configured so I tried to add opensearch-dashboards myself but authorization and authentication was disabled in this configuration so I was unable to login in the web admin interface of Opensearch. In the end I decided to make my own docker-compose from scratch which led to the above errors.
Relevant Logs or Screenshots:
+data prepper docker logs
2023-10-05 13:55:29 WARNING: sun.reflect.Reflection.getCallerClass is not supported. This will impact performance.
+2023-10-05 13:55:50 2023-10-05T11:55:50,660 [main] INFO com.amazon.dataprepper.parser.config.DataPrepperAppConfiguration - Command line args: /usr/share/data-prepper/pipelines.yaml,/usr/share/data-prepper/data-prepper-config.yaml
+2023-10-05 13:55:50 2023-10-05T11:55:50,695 [main] INFO com.amazon.dataprepper.parser.config.DataPrepperArgs - Using /usr/share/data-prepper/pipelines.yaml configuration file
+2023-10-05 13:56:32 2023-10-05T11:56:32,488 [main] WARN com.amazon.dataprepper.parser.model.PipelineConfiguration - Prepper configurations are deprecated, processor configurations will be required in Data Prepper 2.0
+2023-10-05 13:56:32 2023-10-05T11:56:32,559 [main] INFO com.amazon.dataprepper.parser.PipelineParser - Building pipeline [entry-pipeline] from provided configuration
+2023-10-05 13:56:32 2023-10-05T11:56:32,570 [main] INFO com.amazon.dataprepper.parser.PipelineParser - Building [otel_trace_source] as source component for the pipeline [entry-pipeline]
+2023-10-05 13:56:33 2023-10-05T11:56:33,168 [main] WARN com.amazon.dataprepper.plugins.source.oteltrace.OTelTraceSource - Creating otel-trace-source without authentication. This is not secure.
+2023-10-05 13:56:33 2023-10-05T11:56:33,170 [main] WARN com.amazon.dataprepper.plugins.source.oteltrace.OTelTraceSource - In order to set up Http Basic authentication for the otel-trace-source, go here: https://github.com/opensearch-project/data-prepper/tree/main/data-prepper-plugins/otel-trace-source#authentication-configurations
+2023-10-05 13:56:33 2023-10-05T11:56:33,187 [main] INFO com.amazon.dataprepper.parser.PipelineParser - Building buffer for the pipeline [entry-pipeline]
+2023-10-05 13:56:33 2023-10-05T11:56:33,491 [main] INFO com.amazon.dataprepper.parser.PipelineParser - Building processors for the pipeline [entry-pipeline]
+2023-10-05 13:56:33 2023-10-05T11:56:33,500 [main] INFO com.amazon.dataprepper.parser.PipelineParser - Building sinks for the pipeline [entry-pipeline]
+2023-10-05 13:56:33 2023-10-05T11:56:33,506 [main] INFO com.amazon.dataprepper.parser.PipelineParser - Building [pipeline] as sink component
+2023-10-05 13:56:33 2023-10-05T11:56:33,528 [main] INFO com.amazon.dataprepper.parser.PipelineParser - Building pipeline [raw-pipeline] from provided configuration
+2023-10-05 13:56:33 2023-10-05T11:56:33,531 [main] INFO com.amazon.dataprepper.parser.PipelineParser - Building [pipeline] as source component for the pipeline [raw-pipeline]
+2023-10-05 13:56:33 2023-10-05T11:56:33,533 [main] INFO com.amazon.dataprepper.parser.PipelineParser - Building buffer for the pipeline [raw-pipeline]
+2023-10-05 13:56:33 2023-10-05T11:56:33,564 [main] INFO com.amazon.dataprepper.parser.PipelineParser - Building processors for the pipeline [raw-pipeline]
+2023-10-05 13:56:33 2023-10-05T11:56:33,573 [main] WARN com.amazon.dataprepper.parser.PipelineParser - No plugin of type Processor found for plugin setting: otel_trace_raw_prepper, attempting to find comparable Prepper plugin.
+2023-10-05 13:56:33 2023-10-05T11:56:33,745 [main] INFO com.amazon.dataprepper.parser.PipelineParser - Building sinks for the pipeline [raw-pipeline]
+2023-10-05 13:56:33 2023-10-05T11:56:33,751 [main] INFO com.amazon.dataprepper.parser.PipelineParser - Building [opensearch] as sink component
+2023-10-05 13:56:34 2023-10-05T11:56:34,109 [main] INFO com.amazon.dataprepper.plugins.sink.opensearch.OpenSearchSink - Initializing OpenSearch sink
+2023-10-05 13:56:36 2023-10-05T11:56:36,857 [main] INFO com.amazon.dataprepper.plugins.sink.opensearch.ConnectionConfiguration - Using the trust all strategy
+2023-10-05 13:56:38 2023-10-05T11:56:38,087 [main] ERROR com.amazon.dataprepper.plugin.PluginCreator - Encountered exception while instantiating the plugin OpenSearchSink
+2023-10-05 13:56:38 java.lang.reflect.InvocationTargetException: null
+2023-10-05 13:56:38 at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?]
+2023-10-05 13:56:38 at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:64) ~[?:?]
+2023-10-05 13:56:38 at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?]
+2023-10-05 13:56:38 at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:500) ~[?:?]
+2023-10-05 13:56:38 at java.lang.reflect.Constructor.newInstance(Constructor.java:481) ~[?:?]
+2023-10-05 13:56:38 at com.amazon.dataprepper.plugin.PluginCreator.newPluginInstance(PluginCreator.java:40) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at com.amazon.dataprepper.plugin.DefaultPluginFactory.loadPlugin(DefaultPluginFactory.java:66) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at com.amazon.dataprepper.parser.PipelineParser.buildSinkOrConnector(PipelineParser.java:180) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at java.util.stream.ReferencePipeline$3$1.accept(ReferencePipeline.java:195) ~[?:?]
+2023-10-05 13:56:38 at java.util.ArrayList$ArrayListSpliterator.forEachRemaining(ArrayList.java:1625) ~[?:?]
+2023-10-05 13:56:38 at java.util.stream.AbstractPipeline.copyInto(AbstractPipeline.java:484) ~[?:?]
+2023-10-05 13:56:38 at java.util.stream.AbstractPipeline.wrapAndCopyInto(AbstractPipeline.java:474) ~[?:?]
+2023-10-05 13:56:38 at java.util.stream.ReduceOps$ReduceOp.evaluateSequential(ReduceOps.java:913) ~[?:?]
+2023-10-05 13:56:38 at java.util.stream.AbstractPipeline.evaluate(AbstractPipeline.java:234) ~[?:?]
+2023-10-05 13:56:38 at java.util.stream.ReferencePipeline.collect(ReferencePipeline.java:578) ~[?:?]
+2023-10-05 13:56:38 at com.amazon.dataprepper.parser.PipelineParser.buildPipelineFromConfiguration(PipelineParser.java:107) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at com.amazon.dataprepper.parser.PipelineParser.parseConfiguration(PipelineParser.java:72) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at com.amazon.dataprepper.DataPrepper.<init>(DataPrepper.java:57) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?]
+2023-10-05 13:56:38 at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:64) ~[?:?]
+2023-10-05 13:56:38 at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?]
+2023-10-05 13:56:38 at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:500) ~[?:?]
+2023-10-05 13:56:38 at java.lang.reflect.Constructor.newInstance(Constructor.java:481) ~[?:?]
+2023-10-05 13:56:38 at org.springframework.beans.BeanUtils.instantiateClass(BeanUtils.java:211) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.SimpleInstantiationStrategy.instantiate(SimpleInstantiationStrategy.java:117) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.ConstructorResolver.instantiate(ConstructorResolver.java:311) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.ConstructorResolver.autowireConstructor(ConstructorResolver.java:296) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.autowireConstructor(AbstractAutowireCapableBeanFactory.java:1372) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBeanInstance(AbstractAutowireCapableBeanFactory.java:1222) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:582) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:542) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:335) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:234) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:333) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:208) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.config.DependencyDescriptor.resolveCandidate(DependencyDescriptor.java:276) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.DefaultListableBeanFactory.doResolveDependency(DefaultListableBeanFactory.java:1391) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.DefaultListableBeanFactory.resolveDependency(DefaultListableBeanFactory.java:1311) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.ConstructorResolver.resolveAutowiredArgument(ConstructorResolver.java:887) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.ConstructorResolver.createArgumentArray(ConstructorResolver.java:791) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.ConstructorResolver.instantiateUsingFactoryMethod(ConstructorResolver.java:541) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.instantiateUsingFactoryMethod(AbstractAutowireCapableBeanFactory.java:1352) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBeanInstance(AbstractAutowireCapableBeanFactory.java:1195) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:582) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:542) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:335) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:234) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:333) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:208) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.config.DependencyDescriptor.resolveCandidate(DependencyDescriptor.java:276) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.DefaultListableBeanFactory.doResolveDependency(DefaultListableBeanFactory.java:1391) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.DefaultListableBeanFactory.resolveDependency(DefaultListableBeanFactory.java:1311) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.ConstructorResolver.resolveAutowiredArgument(ConstructorResolver.java:887) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.ConstructorResolver.createArgumentArray(ConstructorResolver.java:791) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.ConstructorResolver.instantiateUsingFactoryMethod(ConstructorResolver.java:541) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.instantiateUsingFactoryMethod(AbstractAutowireCapableBeanFactory.java:1352) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBeanInstance(AbstractAutowireCapableBeanFactory.java:1195) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:582) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:542) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:335) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:234) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:333) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:208) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.config.DependencyDescriptor.resolveCandidate(DependencyDescriptor.java:276) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.DefaultListableBeanFactory.doResolveDependency(DefaultListableBeanFactory.java:1391) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.DefaultListableBeanFactory.resolveDependency(DefaultListableBeanFactory.java:1311) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.ConstructorResolver.resolveAutowiredArgument(ConstructorResolver.java:887) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.ConstructorResolver.createArgumentArray(ConstructorResolver.java:791) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.ConstructorResolver.autowireConstructor(ConstructorResolver.java:229) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.autowireConstructor(AbstractAutowireCapableBeanFactory.java:1372) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBeanInstance(AbstractAutowireCapableBeanFactory.java:1222) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:582) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:542) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:335) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:234) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:333) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:208) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.beans.factory.support.DefaultListableBeanFactory.preInstantiateSingletons(DefaultListableBeanFactory.java:955) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.context.support.AbstractApplicationContext.finishBeanFactoryInitialization(AbstractApplicationContext.java:918) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.springframework.context.support.AbstractApplicationContext.refresh(AbstractApplicationContext.java:583) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at com.amazon.dataprepper.ContextManager.<init>(ContextManager.java:48) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at com.amazon.dataprepper.DataPrepperExecute.main(DataPrepperExecute.java:22) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 Caused by: java.lang.RuntimeException: opensearch: Name or service not known
+2023-10-05 13:56:38 at com.amazon.dataprepper.plugins.sink.opensearch.OpenSearchSink.<init>(OpenSearchSink.java:92) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 ... 82 more
+2023-10-05 13:56:38 Caused by: java.io.IOException: opensearch: Name or service not known
+2023-10-05 13:56:38 at org.opensearch.client.RestClient.extractAndWrapCause(RestClient.java:912) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.opensearch.client.RestClient.performRequest(RestClient.java:301) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.opensearch.client.RestClient.performRequest(RestClient.java:289) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.opensearch.client.RestHighLevelClient.internalPerformRequest(RestHighLevelClient.java:1762) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.opensearch.client.RestHighLevelClient.performRequest(RestHighLevelClient.java:1728) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.opensearch.client.RestHighLevelClient.performRequestAndParseEntity(RestHighLevelClient.java:1696) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at org.opensearch.client.ClusterClient.getSettings(ClusterClient.java:119) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at com.amazon.dataprepper.plugins.sink.opensearch.index.IndexManager.checkISMEnabled(IndexManager.java:149) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at com.amazon.dataprepper.plugins.sink.opensearch.index.IndexManager.checkAndCreateIndexTemplate(IndexManager.java:165) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at com.amazon.dataprepper.plugins.sink.opensearch.index.IndexManager.setupIndex(IndexManager.java:160) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at com.amazon.dataprepper.plugins.sink.opensearch.OpenSearchSink.initialize(OpenSearchSink.java:105) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 at com.amazon.dataprepper.plugins.sink.opensearch.OpenSearchSink.<init>(OpenSearchSink.java:89) ~[data-prepper.jar:1.5.1]
+2023-10-05 13:56:38 ... 82 more
+
+
opentelemetry docker logs:
2023-10-05 13:55:25 2023/10/05 11:55:25 proto: duplicate proto type registered: jaeger.api_v2.PostSpansRequest
+2023-10-05 13:55:25 2023/10/05 11:55:25 proto: duplicate proto type registered: jaeger.api_v2.PostSpansResponse
+2023-10-05 13:55:25 2023-10-05T11:55:25.683Zinfoservice/telemetry.go:115Setting up own telemetry...
+2023-10-05 13:55:25 2023-10-05T11:55:25.685Zinfoservice/telemetry.go:156Serving Prometheus metrics{"address": ":8888", "level": "basic"}
+2023-10-05 13:55:25 2023-10-05T11:55:25.692Zinfoservice/service.go:112Starting otelcol-contrib...{"Version": "0.59.0", "NumCPU": 4}
+2023-10-05 13:55:25 2023-10-05T11:55:25.693Zinfoextensions/extensions.go:42Starting extensions...
+2023-10-05 13:55:25 2023-10-05T11:55:25.696Zinfopipelines/pipelines.go:74Starting exporters...
+2023-10-05 13:55:25 2023-10-05T11:55:25.697Zinfopipelines/pipelines.go:78Exporter is starting...{"kind": "exporter", "data_type": "traces", "name": "otlp/2"}
+2023-10-05 13:55:25 2023-10-05T11:55:25.708Zinfopipelines/pipelines.go:82Exporter started.{"kind": "exporter", "data_type": "traces", "name": "otlp/2"}
+2023-10-05 13:55:25 2023-10-05T11:55:25.711Zinfopipelines/pipelines.go:86Starting processors...
+2023-10-05 13:55:25 2023-10-05T11:55:25.713Zinfopipelines/pipelines.go:90Processor is starting...{"kind": "processor", "name": "batch", "pipeline": "traces"}
+2023-10-05 13:55:25 2023-10-05T11:55:25.713Zinfopipelines/pipelines.go:94Processor started.{"kind": "processor", "name": "batch", "pipeline": "traces"}
+2023-10-05 13:55:25 2023-10-05T11:55:25.713Zinfopipelines/pipelines.go:98Starting receivers...
+2023-10-05 13:55:25 2023-10-05T11:55:25.713Zinfopipelines/pipelines.go:102Receiver is starting...{"kind": "receiver", "name": "otlp", "pipeline": "traces"}
+2023-10-05 13:55:25 2023-10-05T11:55:25.714Zinfootlpreceiver/otlp.go:70Starting GRPC server on endpoint 0.0.0.0:4317{"kind": "receiver", "name": "otlp", "pipeline": "traces"}
+2023-10-05 13:55:25 2023-10-05T11:55:25.715Zinfootlpreceiver/otlp.go:88Starting HTTP server on endpoint 0.0.0.0:4318{"kind": "receiver", "name": "otlp", "pipeline": "traces"}
+2023-10-05 13:55:25 2023-10-05T11:55:25.715Zinfopipelines/pipelines.go:106Receiver started.{"kind": "receiver", "name": "otlp", "pipeline": "traces"}
+2023-10-05 13:55:25 2023-10-05T11:55:25.715Zinfoservice/service.go:129Everything is ready. Begin running and processing data.
+2023-10-05 13:55:25 2023-10-05T11:55:25.759Zwarnzapgrpc/zapgrpc.go:191[core] [Channel #1 SubChannel #2] grpc: addrConn.createTransport failed to connect to {
+2023-10-05 13:55:25 "Addr": "data-prepper:21890",
+2023-10-05 13:55:25 "ServerName": "data-prepper:21890",
+2023-10-05 13:55:25 "Attributes": null,
+2023-10-05 13:55:25 "BalancerAttributes": null,
+2023-10-05 13:55:25 "Type": 0,
+2023-10-05 13:55:25 "Metadata": null
+2023-10-05 13:55:25 }. Err: connection error: desc = "transport: Error while dialing dial tcp 192.168.32.2:21890: connect: connection refused"{"grpc_log": true}
+
1 post - 1 participant
+ ]]> +Describe the issue:
We have been working LTR Plugin for search relevancy. In this context, as per the LTR needs,
The feature set needs to be created using api
+The logging feature has to be invoked using api
+Accordingly the judgment list needs to be created
+The model (e.g. xgboost) has be trained
+The model needs to be uploded.
Wanted to understand with model serving framework in place, can all the steps for ltr be done using the model serving framework and pipeline. If yes, can you refer to an example.
If not, do we have any plans for this down the line
Configuration:
Relevant Logs or Screenshots:
1 post - 1 participant
+ ]]> +I have been tetsing logstash s3snssqs plugin to fetch logs from sqs s3.
Earlier the stack was s3 ← sqs ← filebeat → logstash → opensearch
Now we are trying to eliminate filebeat and want to grab logs through the input plugin
+It worked fine in dev env ( test ) and now I am trying to implement the same in qa env but while configuring the s3snssqs plugin as input we are receiving a log count is 100’s ( filebeat service was stopped at this point ) where as with filebeat config we are receiving as expected logs ( which is in min 1k logs per minute )
Anyone faced this type of issue with this plugin ( s3snssqs ) or is it something happening in the backend between sqs and the plugin trying to fetch the logs.
Here’s the sample snippet of the logstash config input section
s3snssqs {
+ region => "us-west-1"
+ from_sns => false
+ consumer_threads => 2
+ s3_default_options => { "endpoint_discovery" => true }
+ queue => "n11-us-va-03-alb-logs-message-queue"
+ access_key_id => "#########"
+ secret_access_key => "#############"
+ type => "sqs-logs"
+ tags => ["alb-logs"]
+ sqs_skip_delete => false
+ codec => json
+ s3_options_by_bucket => [
+ { bucket_name => "n11-us-va-03-alb-logs-message-queue"
+ folders => [
+ { key => ".*/alb.*"
+ codec => "json_stream"
+ type => "ALB"}
+ ]
+ }
+ ]
+ }
+
Can anyone please help me with this please, Thank you
1 post - 1 participant
+ ]]> +When I go to my index pattern in dashboards, it says that there are 12 mapping conflicts, or sometimes that number may change. With earlier versions of Kibana typing conflict would display the fields. I tried same thing in dashboards but no dice. Is there a way to identify them? I have 1400 fields in my index
2 posts - 2 participants
+ ]]> +I installed latest using yum, and changed 9200 port to 9201, to allow nginx. Otherwise, it’s basically out-of-the-box.
Both OpenSearch and Dashboards start. But Dashboards can’t connect to OpenSearch. I don’t know what other configuration is needed.
Any help would be greatly appreciated!
This is the Dashboard log:
Oct 04 19:15:52 myserver opensearch-dashboards[110119]: {"type":"log","@timestamp":"2023-10-04T19:15:52Z","tags":["warning","savedobjects-service"],"pid":110119,"message":"Unable to connect to OpenSearch. Error: Given the configuration, the ConnectionPool was not able to find a usable Connection for this request."}
+Oct 04 19:37:28 myserver opensearch-dashboards[115045]: {"type":"log","@timestamp":"2023-10-04T19:37:28Z","tags":["info","plugins-service"],"pid":115045,"message":"Plugin \"dataSourceManagement\" has been disabled since the following direct or transitive dependencies are missing or disabled: [dataSource]"}
+Oct 04 19:37:28 myserver opensearch-dashboards[115045]: {"type":"log","@timestamp":"2023-10-04T19:37:28Z","tags":["info","plugins-service"],"pid":115045,"message":"Plugin \"dataSource\" is disabled."}
+Oct 04 19:37:28 myserver opensearch-dashboards[115045]: {"type":"log","@timestamp":"2023-10-04T19:37:28Z","tags":["info","plugins-service"],"pid":115045,"message":"Plugin \"visTypeXy\" is disabled."}
+Oct 04 19:37:28 myserver opensearch-dashboards[115045]: {"type":"log","@timestamp":"2023-10-04T19:37:28Z","tags":["warning","config","deprecation"],"pid":115045,"message":"\"opensearch.requestHeadersWhitelist\" is deprecated and has been replaced by \"opensearch.requestHeadersAllowlist\""}
+Oct 04 19:37:28 myserver opensearch-dashboards[115045]: {"type":"log","@timestamp":"2023-10-04T19:37:28Z","tags":["info","plugins-system"],"pid":115045,"message":"Setting up [51] plugins: [usageCollection,opensearchDashboardsUsageCollection,opensearchDashboardsLegacy,mapsLegacy,share,opensearchUiShared,legacyExport,embeddable,expressions,data,securityAnalyticsDashboards,home,apmOss,savedObjects,searchRelevanceDashboards,reportsDashboards,dashboard,mlCommonsDashboards,visualizations,visTypeVega,visTypeTimeline,visTypeTable,visTypeMarkdown,visBuilder,visAugmenter,anomalyDetectionDashboards,alertingDashboards,tileMap,regionMap,customImportMapDashboards,inputControlVis,ganttChartDashboards,visualize,queryWorkbenchDashboards,indexManagementDashboards,notificationsDashboards,management,indexPatternManagement,advancedSettings,console,dataExplorer,charts,visTypeVislib,visTypeTimeseries,visTypeTagcloud,visTypeMetric,observabilityDashboards,discover,savedObjectsManagement,securityDashboards,bfetch]"}
+Oct 04 19:37:29 myserver opensearch-dashboards[115045]: {"type":"log","@timestamp":"2023-10-04T19:37:29Z","tags":["info","savedobjects-service"],"pid":115045,"message":"Waiting until all OpenSearch nodes are compatible with OpenSearch Dashboards before starting saved objects migrations..."}
+Oct 04 19:37:29 myserver opensearch-dashboards[115045]: {"type":"log","@timestamp":"2023-10-04T19:37:29Z","tags":["error","opensearch","data"],"pid":115045,"message":"[ConnectionError]: write EPROTO 140466277648256:error:1408F10B:SSL routines:ssl3_get_record:wrong version number:../deps/openssl/openssl/ssl/record/ssl3_record.c:332:\n"}
+Oct 04 19:37:29 myserver opensearch-dashboards[115045]: {"type":"log","@timestamp":"2023-10-04T19:37:29Z","tags":["error","savedobjects-service"],"pid":115045,"message":"Unable to retrieve version information from OpenSearch nodes."}
+Oct 04 19:37:31 myserver opensearch-dashboards[115045]: {"type":"log","@timestamp":"2023-10-04T19:37:31Z","tags":["error","opensearch","data"],"pid":115045,"message":"[ConnectionError]: write EPROTO 140466277648256:error:1408F10B:SSL routines:ssl3_get_record:wrong version number:../deps/openssl/openssl/ssl/record/ssl3_record.c:332:\n"}
+Oct 04 19:37:34 myserver opensearch-dashboards[115045]: {"type":"log","@timestamp":"2023-10-04T19:37:34Z","tags":["error","opensearch","data"],"pid":115045,"message":"[ConnectionError]: write EPROTO 140466277648256:error:1408F10B:SSL routines:ssl3_get_record:wrong version number:../deps/openssl/openssl/ssl/record/ssl3_record.c:332:\n"}
+Oct 04 19:37:36 myserver opensearch-dashboards[115045]: {"type":"log","@timestamp":"2023-10-04T19:37:36Z","tags":["error","opensearch","data"],"pid":115045,"message":"[ConnectionError]: write EPROTO 140466277648256:error:1408F10B:SSL routines:ssl3_get_record:wrong version number:../deps/openssl/openssl/ssl/record/ssl3_record.c:332:\n"}
+
+
This is the opensearch.yml:
cluster.name: skynet-elk
+path.data: /ds1/opensearch
+path.logs: /var/log/opensearch
+http.port: 9201
+
+plugins.security.ssl.transport.pemcert_filepath: esnode.pem
+plugins.security.ssl.transport.pemkey_filepath: esnode-key.pem
+plugins.security.ssl.transport.pemtrustedcas_filepath: root-ca.pem
+plugins.security.ssl.transport.enforce_hostname_verification: false
+plugins.security.ssl.http.enabled: true
+plugins.security.ssl.http.pemcert_filepath: esnode.pem
+plugins.security.ssl.http.pemkey_filepath: esnode-key.pem
+plugins.security.ssl.http.pemtrustedcas_filepath: root-ca.pem
+plugins.security.allow_unsafe_democertificates: true
+plugins.security.allow_default_init_securityindex: true
+plugins.security.authcz.admin_dn:
+ - CN=kirk,OU=client,O=client,L=test, C=de
+
+plugins.security.audit.type: internal_opensearch
+plugins.security.enable_snapshot_restore_privilege: true
+plugins.security.check_snapshot_restore_write_privileges: true
+plugins.security.restapi.roles_enabled: ["all_access", "security_rest_api_access"]
+plugins.security.system_indices.enabled: true
+plugins.security.system_indices.indices: [".plugins-ml-config", ".plugins-ml-connector", ".plugins-ml-model-group", ".plugins-ml-model", ".plugins-ml-task", ".plugins-ml-conversation-meta", ".plugins-ml-conversation-interactions", ".opendistro-alerting-config", ".opendistro-alerting-alert*", ".opendistro-anomaly-results*", ".opendistro-anomaly-detector*", ".opendistro-anomaly-checkpoints", ".opendistro-anomaly-detection-state", ".opendistro-reports-*", ".opensearch-notifications-*", ".opensearch-notebooks", ".opensearch-observability", ".ql-datasources", ".opendistro-asynchronous-search-response*", ".replication-metadata-store", ".opensearch-knn-models", ".geospatial-ip2geo-data*"]
+node.max_local_storage_nodes: 3
+
And this is the opensearch-dashboards.yml:
---
+opensearch.hosts: [https://localhost:9200]
+opensearch.ssl.verificationMode: none
+opensearch.username: kibanaserver
+opensearch.password: kibanaserver
+opensearch.requestHeadersWhitelist: [authorization, securitytenant]
+opensearch_security.multitenancy.enabled: true
+opensearch_security.multitenancy.tenants.preferred: [Private, Global]
+opensearch_security.readonly_mode.roles: [kibana_read_only]
+opensearch_security.cookie.secure: false
+
THANK YOU!!!
3 posts - 2 participants
+ ]]> +At the moment I am using dynamic template for my indices. In reality, the only thing I specify in there is the primary/replica shard allocation count. Everything else is dynamically generated. At the moment fields that are written as long have comma in them. Is there a way to get rid of it? Do i need to convert them to integer or double? My mapping may change over time as I write different events to index, but would it be possible to explicitly define the field types in the mapping template for the fields that I know, and still allow for other fields to be genrated dynamically?
For example for this snippet in the mapping
"inode" : {
+ "type" : "long"
+ },
+
Can I define mapping for this node as integer or double?
2 posts - 2 participants
+ ]]> +Describe the issue:
+Hi to all
+I have 3-node wazuh indexer cluster based on OS 2.6.0 and want to set snapshot management policy to take snapshots every 5 hours to external nfs share for example
+snapshot management not working and i see next repeating log entry in /var/log/wazuh-indexer/wazuh-cluster.log
[WARN ][o.o.i.s.SMRunner ] [node-1] Cannot acquire lock for snapshot management job test_policy
On screenshot policy has a different name but it doesn’t matter
+I’ll add that if i launch snapshot manually it will be created so no issues with NFS access for read/write
+Also had tested snapshots management policy with another wazuh dedicated for tests with only 1 wazuh-indexer based on OS 2.6.0 and it work with same NFS storage
Configuration:
+Steps to configure snapshot management policy:
Relevant Logs or Screenshots:
+[WARN ][o.o.i.s.SMRunner ] [node-1] Cannot acquire lock for snapshot management job test_policy
1 post - 1 participant
+ ]]> +I recently upgraded OS version to 2.9 and noticed this error while trying to create dashboards from my visualization.
+“aws.auth.client.error.ARCInvalidSignatureException: Invalid signature, does not match”
+Every other feature after the migration works just fine.
+We use AWS cognito and I have revisited all the roles. I had done the same set up(Cognito included) with OS version 2.7 and it worked perfectly fine.
In addition to this, my Opensearch console just stops working after this error and requires me to re log-in.
Even trying to load Opensearch provided sample dashboard([Flights] Global Flight Dashboard) is throwing an error.
3 posts - 2 participants
+ ]]> +Describe the issue:
+I use OpenSearch along with Logstash and Filebeat to ingest and analyze logs and extract metrics.
The index names are created in the Logstash, here some examples:
I have three ISM policies which delete the indices:
What I want to achieve is to add via API a rollup job which would take the weekly index, aggregate data into a new index which would be deleted after a year. Ideally I would like to use the current retention policies.
For instance all indices component1_r_m-2023.30 … - … component1_r_m-2023.30 would be aggregated to rollup-component1_r_y-2023.3 and component1_r_m-2023.40 … - … component1_r_m-2023.49 would be aggregated to rollup-component1_r_y-2023.4, and so on.
In a nutshell, I want to achieve what index rollup is intended to do, aggregated data and keep if for longer in less number of indices or shards.
Configuration:
+First I tried the Rollup Jobs. Here though there are 2 issues.
For the reference there were 2 rollup jobs created:
Job1
+“source_index”: “component1_r_m-2023.3*”,
+“target_index”: “rollup-component1_r_y-2023.3”
Job2
+“source_index”: “component1_r_m-2023.4*”,
+“target_index”: “rollup-component1_r_y-2023.4”
The Job1 was enabled but Job 2 failed due to unavailability of the source_index and disabled.
+So, this method will not work.
So I’m trying now with the ISM rollover with rollup action.
I’ve added the alias and ism rollover alias to the index template.
PUT _index_template/component1
+{
+“index_patterns”: [
+“component1*”
+],
+“priority”: 5,
+“template”: {
+“aliases”: {
+“xxyyzz”: {
+“is_write_index”: true
+}
+},
+“settings”: {
+“number_of_shards”: “1”,
+“refresh_interval”: “30s”,
+“plugins.index_state_management.rollover_alias”: “xxyyzz”
+},
+“mappings”: {
+“dynamic”: false,
When checking the details of the index I see that the alias, write index and rollover_alias are there
+{
+“component1_r_w-2023.39-0001”: {
+“aliases”: {
+“xxyyzz”: {
+“is_write_index”: true
+}
+},
+…
+“settings”: {
+“index”: {
+“refresh_interval”: “30s”,
+“number_of_shards”: “1”,
+“plugins”: {
+“index_state_management”: {
+“rollover_alias”: “xxyyzz”
+}
+},
The ISM policy is a per the prescription in the OpenSearch docu:
PUT _plugins/_ism/policies/rollover_policy_component1
+{
+“policy”: {
+“description”: “Example rollover policy component1.”,
+“default_state”: “rollover”,
+“states”: [
+{
+“name”: “rollover”,
+“actions”: [
+{
+“rollover”: {
+“min_doc_count”: 1
+}
+}
+],
+“transitions”: [
+{
+“state_name”: “rp”
+}
+]
+},
+{
+“name”: “rp”,
+“actions”: [
+{
+“rollup”: {
+“ism_rollup”: {
+“target_index”: “rollup_ndx-{{ctx.source_index}}”,
+“description”: “Example rollup job”,
+“page_size”: 5,
+“dimensions”: [
+{
+“date_histogram”: {
+“source_field”: “ts”,
+“fixed_interval”: “5m”,
+“timezone”: “UTC”
+…
+],
+“metrics”:
+}
+}
+}
+],
+“transitions”:
+}
+],
+“ism_template”: {
+“index_patterns”: [“component1*”],
+“priority”: 80
+}
+}
+}
This is the latest error I’m facing. The rollover alias should be pointing to multiple indices therefore I’m puzzled how to move on and if I will be able to fulfill my requirements.
+{
+“cause”: “Rollover alias [xxyyzz] can point to multiple indices, found duplicated alias [[xxyyzz]] in index template [component1]”,
+“message”: “Failed to rollover index [index=component1_r_w-2023.39-0001]”
+}
It would be much easier and intuitive if the target_index could be dynamically created but without dependency to the source_index. This could have been done using solution similar to the one in Logstash.
If I could create a rollup job
+“source_index”: “component1_r_m-*”,
+“target_index”: “rollup-component1_r_y-%{+YYYY.MM}”
then data in source indices would be aggregated to the monthly indices which would be dropped after a year.
Relevant Logs or Screenshots:
2 posts - 1 participant
+ ]]> +OpenSearchClientExample.java when run with mentioned dependencies throws java.lang.invoke.LambdaConversionException
+
+
+
Configuration:
+org.opensearch.client
+opensearch-rest-client
+1.2.4
+org.opensearch.client
+opensearch-java
+0.1.0
Relevant Logs or Screenshots:
Exception in thread “main” java.lang.BootstrapMethodError: bootstrap method initialization exception
+at java.base/java.lang.invoke.BootstrapMethodInvoker.invoke(BootstrapMethodInvoker.java:194)
+at java.base/java.lang.invoke.CallSite.makeSite(CallSite.java:307)
+at java.base/java.lang.invoke.MethodHandleNatives.linkCallSiteImpl(MethodHandleNatives.java:258)
+at java.base/java.lang.invoke.MethodHandleNatives.linkCallSite(MethodHandleNatives.java:248)
+at org.opensearch.client.RestClient.convertResponse(RestClient.java:330)
+at org.opensearch.client.RestClient.performRequest(RestClient.java:314)
+at org.opensearch.client.RestClient.performRequest(RestClient.java:289)
+at org.opensearch.client.base.RestClientTransport.performRequest(RestClientTransport.java:77)
+at org.opensearch.client.opensearch.OpenSearchClient.index(OpenSearchClient.java:641)
+at com.dmat.lambda.OpenSearchClientExample.main(OpenSearchClientExample.java:60)
+Caused by: java.lang.invoke.LambdaConversionException: Invalid receiver type interface org.apache.http.Header; not a subtype of implementation type interface org.apache.http.NameValuePair
+at java.base/java.lang.invoke.AbstractValidatingLambdaMetafactory.validateMetafactoryArgs(AbstractValidatingLambdaMetafactory.java:254)
+at java.base/java.lang.invoke.LambdaMetafactory.metafactory(LambdaMetafactory.java:328)
+at java.base/java.lang.invoke.BootstrapMethodInvoker.invoke(BootstrapMethodInvoker.java:127)
+… 9 more
1 post - 1 participant
+ ]]> +Latest
Describe the issue:
I am trying to search a set of words and also filter the results using filter and terms options, however, the query does not return anything. If I remove the filter and terms options, then it returns results.
+Need help in understanding why this happens.
+Here is the query I am using -
+search_query = {
+“query”: {
+“bool”: {
+“must”: {
+“simple_query_string”: {
+“query”: “Heart”,
+“fields”: [‘section’, ‘subsection’, ‘question’, ‘answer’],
+“default_operator”:“OR”
}
+ },
+ "filter": [
+ {
+ "term": {"section": "Medicine"}
+ }
+
+ ]
+ }
+ }
+ }
+
Configuration:
Relevant Logs or Screenshots:
1 post - 1 participant
+ ]]> +Describe the issue:
+I have an opensearch index which will keep indexing data for below fields
+requestorName, accountName, endpoint and health at every 15 mins.
+Example 1) RSSRequestor, account1, http:///…1.rss, UP.
+2) RSSRequestor, account2, http:///…2.rss, DOWN,
+3) DataRequestor, account1, http:///…any.com, DOWN.
+I want to generate alerts that can send individual email notification when health of requestor is DOWN.
+For the above example it should send 2 emails. 1) RssRequestor, account2 having endpoint http:///…2.rss is DOWN
+2)DataRequestor, account1 having endpoint http:///…any.com is DOWN.
+One requestor can have several accounts and each account can have multiple endpoints. I want to generate individual email for each requestor for all endpoints
+that are down. Also want to send alert if the endpoint is back UP. Can you provide full example of how to use open search alert for this scenario.
+I tried using per query monitor but it sends only single alert.
Questions
+Which monitor should I use? Can you guide on how to send multiple email notifications for each requesters that is down?
+Also want to send an email after the requester is back UP.
Tried with the below query monitor
"query": {
+ "bool": {
+ "must": [
+ {
+ "term": {
+ "health.keyword": {
+ "value": "DOWN",
+ "boost": 1
+ }
+ }
+ }
+ ],
+ "adjust_pure_negative": true,
+ "boost": 1
+ }
+ }
+
3 posts - 2 participants
+ ]]> +anonymous_authentication
, but couldn’t find how configure roles/users for the same?)3 posts - 2 participants
+ ]]> +Is there a way to disable the color desaturation when auto refreshing a dashboard?
The feature looks to have been removed in kibana. Kibana Change
2 posts - 2 participants
+ ]]> +Describe the issue:
+The issue here is we were upgrading the cluster and unforunatley 75% of the nodes went down.
+Now we managed with the remaining upgrade, but after upgrade the cluster is RED due to two indices not found (.opendistro-anomaly-detectors AND .opendistro-reports-instances).
+Now I tried to delete those indices with admin credentials, but getting the below response
{
+ "error": {
+ "root_cause": [
+ {
+ "type": "security_exception",
+ "reason": "no permissions for [] and User [name=elastic, backend_roles=[], requestedTenant=null]"
+ }
+ ],
+ "type": "security_exception",
+ "reason": "no permissions for [] and User [name=elastic, backend_roles=[], requestedTenant=null]"
+ },
+ "status": 403
+}
+
The user elastic is the admin user and I have ran out of ideas to delete this index.
+Tried via command line and via UI.
Can anyone help?
2 posts - 2 participants
+ ]]> +Describe the issue:
+Please give a working java code that can fetch all indices of a cluster using java rest client version 2.1
Configuration:
Relevant Logs or Screenshots:
2 posts - 2 participants
+ ]]> +2.0.8,2.10.0
Describe the issue:
+Hi,
+the Druid lib version included (1.0.15) is pretty old.
+Versions of alibaba-druid 1.0.0 - 1.1.19 are reported as vulnerable https://www.compass-security.com/fileadmin/Datein/Research/Advisories/CSNC-2019-022_alibaba-druid.txt
Is there a plan to update it?
Configuration:
Relevant Logs or Screenshots:
1 post - 1 participant
+ ]]> +Describe the issue:
+For any new developer, it’s difficult to understand the guidelines for contribution.
=> I don’t see any guidelines about the development branch to start.
=> Every time yarn start:docker takes almost more than 20-25mins to get up. So is there any
+specific system configuration for this project to run?
Configuration:
Relevant Logs or Screenshots:
1 post - 1 participant
+ ]]> +Describe the issue:
+Newbie question, sorry.
All documentation I’ve seen for posting a document involves nesting the terms in a properties
object:
POST locations-b/_doc/
+{
+ "properties": {
+ "zipcode" : "02134"
+ }
+}
+
The document indexes successfully, and a match_all
query shows the doc. But if I go to search for it, this turns up 0 hits:
GET locations-b/_search
+{
+ "query": {
+ "match": {"zipcode": "02134"}
+ }
+}
+
In order to get hits, I need to specify the properties “container” in the query. This does work:
GET locations-b/_search
+{
+ "query": {
+ "match": {"properties.zipcode": "02134"}
+ }
+}
+
But none of the search examples in documentation show that properties.
needs to be specified in the query.
Am I doing something wrong? Or is this a quirk of the AWS implementation? Is there way to set up a mask or something so that it’s always implied? Thanks for any suggestions.
Relevant Logs or Screenshots:
3 posts - 2 participants
+ ]]> +Describe the issue:
+Hello All,
+We recently migrated our search cluster from ES ( “version” : {
+“number” : “6.8.16”,
+“build_flavor” : “default”,
+“build_type” : “deb”,
+“build_hash” : “1f62092”,
+“build_date” : “2021-05-21T19:27:57.985321Z”,
+“build_snapshot” : false,
+“lucene_version” : “7.7.3”,
+“minimum_wire_compatibility_version” : “5.6.0”,
+“minimum_index_compatibility_version” : “5.0.0”
+},
to open search . Wrt the H/W both are same . But we are seeing high cpu spike in open search cluster . not sure what are all the next step . Any help would be appreciated
Configuration:
+No of shards : 24 on both OS and ES ( replica count is 1 per primary shard ) .
Relevant Logs or Screenshots:
4 posts - 2 participants
+ ]]> +{
+ "id": "hot_to_cold_policy",
+ "seqNo": 27868,
+ "primaryTerm": 11,
+ "policy": {
+ "policy_id": "hot_to_cold_policy",
+ "description": "Hot to cold transition, snapshot, and delete local index",
+ "last_updated_time": 1696262321299,
+ "schema_version": 18,
+ "error_notification": null,
+ "default_state": "hot",
+ "states": [
+ {
+ "name": "hot",
+ "actions": [],
+ "transitions": [
+ {
+ "state_name": "cold",
+ "conditions": {
+ "min_index_age": "1d"
+ }
+ }
+ ]
+ },
+ {
+ "name": "cold",
+ "actions": [
+ {
+ "retry": {
+ "count": 3,
+ "backoff": "exponential",
+ "delay": "1m"
+ },
+ "snapshot": {
+ "repository": "/opensearch_coldstorage/",
+ "snapshot": "{{ctx.index}}_snapshot"
+ }
+ },
+ {
+ "retry": {
+ "count": 3,
+ "backoff": "exponential",
+ "delay": "1m"
+ },
+ "delete": {}
+ }
+ ],
+ "transitions": []
+ }
+ ],
+ "ism_template": [
+ {
+ "index_patterns": [
+ "*"
+ ],
+ "priority": 100,
+ "last_updated_time": 1696262258189
+ }
+ ]
+ }
+}
+
My repository is a mounted s3 bucket which has full r/w access for the opensearch user BUT I’m receiving the following error
{
+ "cause": "[/opensearch_coldstorage] missing",
+ "message": "Failed to create snapshot [index=xxxx-2023.09.28]"
+}
+
I can manually issue the PUT /_snapshot/ColdStorage/1
command without any issues. Not sure why automatic task is failing.
3 posts - 2 participants
+ ]]> +