diff --git a/pkg/render/elasticsearch.go b/pkg/render/elasticsearch.go index 547d579a84..0c5fee908f 100644 --- a/pkg/render/elasticsearch.go +++ b/pkg/render/elasticsearch.go @@ -160,33 +160,25 @@ func (es elasticsearchComponent) pvcTemplate() corev1.PersistentVolumeClaim { // Generate the pod template required for the ElasticSearch nodes (controls the ElasticSearch container) func (es elasticsearchComponent) podTemplate() corev1.PodTemplateSpec { - // Setup default configuration for ES container + // Setup default configuration for ES container. For more information on managing resources, see: + // https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-managing-compute-resources.html and + // https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-jvm-heap-size.html#k8s-jvm-heap-size + esContainer := corev1.Container{ Name: "elasticsearch", - // Important note: Following Elastic ECK docs, the recommended practice is to set - // request and limit for memory to the same value: - // https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-managing-compute-resources.html#k8s-compute-resources-elasticsearch - // - // Default values for memory request and limit taken from ECK docs: - // https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-managing-compute-resources.html#k8s-default-behavior Resources: corev1.ResourceRequirements{ Limits: corev1.ResourceList{ "cpu": resource.MustParse("1"), - "memory": resource.MustParse("2Gi"), + "memory": resource.MustParse("4Gi"), }, Requests: corev1.ResourceList{ - "cpu": resource.MustParse("1"), - "memory": resource.MustParse("2Gi"), + "cpu": resource.MustParse("250m"), + "memory": resource.MustParse("4Gi"), }, }, Env: []corev1.EnvVar{ - // Important note: Following Elastic ECK docs, the recommendation is to set - // the Java heap size to half the size of RAM allocated to the Pod: - // https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-managing-compute-resources.html#k8s-compute-resources-elasticsearch - // - // Default values for Java Heap min and max taken from ECK docs: - // https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-jvm-heap-size.html#k8s-jvm-heap-size - {Name: "ES_JAVA_OPTS", Value: "-Xms1G -Xmx1G"}, + // Set to 30% of the default memory, such that resources can be divided over ES, Lucene and ML. + {Name: "ES_JAVA_OPTS", Value: "-Xms1398101K -Xmx1398101K"}, }, } diff --git a/pkg/render/elasticsearch_test.go b/pkg/render/elasticsearch_test.go index 65b439d6d7..3dd4f2ea95 100644 --- a/pkg/render/elasticsearch_test.go +++ b/pkg/render/elasticsearch_test.go @@ -1,6 +1,7 @@ package render_test import ( + esalpha1 "github.com/elastic/cloud-on-k8s/pkg/apis/elasticsearch/v1alpha1" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" operator "github.com/tigera/operator/pkg/apis/operator/v1" @@ -171,6 +172,22 @@ var _ = Describe("Elasticsearch rendering tests", func() { {"tigera-secure", "tigera-kibana", "", "", ""}, } + resultES := resources[9].(*esalpha1.Elasticsearch).Spec.Nodes[0] + // There are no node selectors in the LogStorage CR, so we expect no node selectors in the Elasticsearch CR. + Expect(resultES.PodTemplate.Spec.NodeSelector).To(BeEmpty()) + Expect(resultES.PodTemplate.Spec.NodeSelector).To(BeEmpty()) + + // Verify that the default container limist/requests are set. + esContainer := resultES.PodTemplate.Spec.Containers[0] + reqLimits := esContainer.Resources.Limits + reqResources := esContainer.Resources.Requests + + Expect(reqLimits.Cpu().String()).To(Equal("1")) + Expect(reqLimits.Memory().String()).To(Equal("4Gi")) + Expect(reqResources.Cpu().String()).To(Equal("250m")) + Expect(reqResources.Memory().String()).To(Equal("4Gi")) + Expect(esContainer.Env[0].Value).To(Equal("-Xms1398101K -Xmx1398101K")) + for i, expectedRes := range expectedResources { ExpectResource(resources[i], expectedRes.name, expectedRes.ns, expectedRes.group, expectedRes.version, expectedRes.kind) }