diff --git a/charts/ssd/config/otel/otel-gateway-config.yaml b/charts/ssd/config/otel/otel-gateway-config.yaml
new file mode 100644
index 0000000..f7935ca
--- /dev/null
+++ b/charts/ssd/config/otel/otel-gateway-config.yaml
@@ -0,0 +1,97 @@
+receivers:
+ otlp:
+ protocols:
+ grpc:
+ endpoint: 0.0.0.0:4317
+ http:
+ endpoint: 0.0.0.0:4318
+processors:
+ batch/raw:
+ batch/aggregate:
+ metricstransform/rename:
+ transforms:
+ - include: "^ssd\\.(.*)$"
+ match_type: regexp
+ action: insert
+ new_name: ssd.aggregate.$${1}
+ metricstransform/aggregate:
+ transforms:
+ - include: "^ssd\\..*$"
+ match_type: regexp
+ action: update
+ operations:
+ - action: aggregate_labels
+ label_set: ["ssd.workflow.type","k8s.deployment.name","k8s.namespace.name"]
+ aggregation_type: sum
+ resource/remove_label:
+ attributes:
+ - key: "k8s.pod.ip"
+ action: delete
+ - key: "k8s.pod.name"
+ action: delete
+ - key: "pipeline"
+ action: insert
+ value: "aggregator"
+ transform/remove_label:
+ metric_statements:
+ - context: datapoint
+ statements:
+ - 'delete_key(attributes, "k8s.pod.ip")'
+ - 'delete_key(attributes, "k8s.pod.name")'
+ groupbyattrs/aggregate:
+ keys:
+ - "k8s.deployment.name"
+ - "k8s.namespace.name"
+ filter/aggregate:
+ metrics:
+ include:
+ match_type: regexp
+ metric_names:
+ - "^ssd\\.aggregate\\..*"
+ k8sattributes/raw:
+ auth_type: serviceAccount # Allows access to the Kubernetes API
+ passthrough: false
+ extract:
+ metadata:
+ - k8s.deployment.name
+ - k8s.namespace.name
+ - k8s.pod.name
+ pod_association:
+ - sources:
+ - from: connection
+ k8sattributes/aggregate:
+ auth_type: serviceAccount # Allows access to the Kubernetes API
+ passthrough: false
+ extract:
+ metadata:
+ - k8s.deployment.name
+ - k8s.namespace.name
+ - k8s.pod.name
+ pod_association:
+ - sources:
+ - from: connection
+
+exporters:
+ debug:
+ verbosity: detailed
+ prometheus:
+ endpoint: "0.0.0.0:9464"
+ enable_open_metrics: true
+ metric_expiration: 5m
+ resource_to_telemetry_conversion:
+ enabled: true
+
+service:
+ pipelines:
+ traces:
+ receivers: [otlp]
+ processors: [batch/raw]
+ exporters: [debug]
+ metrics/raw:
+ receivers: [otlp]
+ processors: [k8sattributes/raw,batch/raw]
+ exporters: [debug, prometheus]
+ metrics/aggregate:
+ receivers: [otlp]
+ processors: [k8sattributes/aggregate,metricstransform/aggregate,metricstransform/rename,resource/remove_label,filter/aggregate,batch/aggregate]
+ exporters: [debug, prometheus]
diff --git a/charts/ssd/config/otel/otel-sidecar-config.yaml b/charts/ssd/config/otel/otel-sidecar-config.yaml
new file mode 100644
index 0000000..4179553
--- /dev/null
+++ b/charts/ssd/config/otel/otel-sidecar-config.yaml
@@ -0,0 +1,35 @@
+receivers:
+ otlp:
+ protocols:
+ grpc:
+ http:
+
+processors:
+ batch:
+
+exporters:
+ otlp:
+ endpoint: "http://otel-gateway:4317"
+ tls:
+ insecure: true
+
+ debug:
+ verbosity: detailed
+
+ prometheus:
+ endpoint: "0.0.0.0:9464"
+ enable_open_metrics: true
+ metric_expiration: 5m
+ resource_to_telemetry_conversion:
+ enabled: true
+
+service:
+ pipelines:
+ traces:
+ receivers: [otlp]
+ processors: [batch]
+ exporters: [otlp]
+ metrics:
+ receivers: [otlp]
+ processors: [batch]
+ exporters: [otlp,debug,prometheus]
diff --git a/charts/ssd/config/snyk-monitor/config.yaml b/charts/ssd/config/snyk-monitor/config.yaml
new file mode 100644
index 0000000..9a2aa33
--- /dev/null
+++ b/charts/ssd/config/snyk-monitor/config.yaml
@@ -0,0 +1,19 @@
+run:
+ sync-interval: {{ .Values.snykmonitor.interval }}
+logger:
+ level: {{ .Values.snykmonitor.loglevel | quote }} # options: info, debug
+dgraph:
+ {{- if .Values.dgraph.HA.enabled }}
+ host: http://dgraph-alpha-public
+ {{- else }}
+ host: http://dgraph-public
+ {{- end }}
+ port: 8080
+tool-chain:
+ host: http://tool-chain
+ port: 8100
+ssd-opa:
+ host: http://ssd-opa
+ port: 8200
+otelAddr: localhost:4317
+otelInterval: {{ .Values.otel.interval }}
diff --git a/charts/ssd/config/ssd-gate/services-config.yaml b/charts/ssd/config/ssd-gate/services-config.yaml
index ddb025b..047e504 100644
--- a/charts/ssd/config/ssd-gate/services-config.yaml
+++ b/charts/ssd/config/ssd-gate/services-config.yaml
@@ -96,7 +96,11 @@
streaming: false
strip_path: "/tool-chain"
- name: dgraph
+ {{- if .Values.dgraph.HA.enabled }}
+ baseUrl: dgraph-alpha-public:8080
+ {{- else }}
baseUrl: dgraph-public:8080
+ {{- end }}
health_endpoint: /
auth_required: true
path_prefixes:
@@ -112,6 +116,12 @@
- "/minio"
baseUrl: {{ .Release.Name }}-minio:9000
health_endpoint: ""
+- name: prometheus
+ baseUrl: {{ .Values.prometheusUrl }} #this is dependent on the localtion of the prometheus server
+ health_endpoint: ""
+ auth_required: false
+ path_prefixes:
+ - "/api/v1/query_range"
- name: ssd-temporal-web
baseUrl: {{ .Release.Name }}-temporal-web:8080
health_endpoint: ""
@@ -125,3 +135,9 @@
baseUrl: ssd-dex:5558
health_endpoint: /healthz/ready
{{- end }}
+- name: metrics
+ baseUrl: otel-gateway:9464
+ health_endpoint: ""
+ auth_required: false
+ path_prefixes:
+ - "/metrics"
diff --git a/charts/ssd/config/ssd-opa/ssd-opa.yaml b/charts/ssd/config/ssd-opa/ssd-opa.yaml
index ce77068..2369f89 100644
--- a/charts/ssd/config/ssd-opa/ssd-opa.yaml
+++ b/charts/ssd/config/ssd-opa/ssd-opa.yaml
@@ -1,5 +1,11 @@
httpListenPort: 8200
+{{- if .Values.dgraph.HA.enabled }}
+graphQLAddr: http://dgraph-alpha-public:8080/graphql
+dgraphQLAddr: http://dgraph-alpha-public:8080/query
+{{- else }}
graphQLAddr: http://dgraph-public:8080/graphql
+dgraphQLAddr: http://dgraph-public:8080/query
+{{- end }}
ssdUrl: {{.Values.global.ssdUI.protocol}}://{{.Values.global.ssdUI.host}}
logLevel: {{ .Values.ssdopa.loglevel }}
reScheduler: false
@@ -7,3 +13,6 @@ redis:
address : {{ .Release.Name }}-redis-master:{{ .Values.redis.port }}
password: {{ .Values.redis.password }}
db: 0
+otelAddr: localhost:4317
+otelInterval: {{ .Values.otel.interval }}
+otelLongInterval: {{ .Values.otel.LongInterval }}
diff --git a/charts/ssd/config/ssd-ui/app-config.json b/charts/ssd/config/ssd-ui/app-config.json
index 3ba0fdd..a8138db 100644
--- a/charts/ssd/config/ssd-ui/app-config.json
+++ b/charts/ssd/config/ssd-ui/app-config.json
@@ -6,5 +6,6 @@
"stageSecurityThresholdMinScore": 50,
"stageSecurityThresholdMaxScore": 70,
"dgraphToken": "",
- "dgraphEndPointUrl": "/graphql"
+ "dgraphEndPointUrl": "/graphql",
+ "prometheusEndPointUrl": "/api/v1/query_range"
}
diff --git a/charts/ssd/config/ssd-ui/help-text.json b/charts/ssd/config/ssd-ui/help-text.json
index c91d79c..9a4471c 100644
--- a/charts/ssd/config/ssd-ui/help-text.json
+++ b/charts/ssd/config/ssd-ui/help-text.json
@@ -717,6 +717,28 @@
}
}
},
+ "SYSDIG": {
+ "HEADER": "Sysdig",
+ "BODY": "",
+ "NAME": {
+ "TOOLTIP": "",
+ "VALIDATION_MESSAGE": {
+ "noSpecialCharacters": "Account Name cannot contain special characters other than -",
+ "cannotContainSpace": "Account Name cannot contain space",
+ "required": "Account Name cannot be empty",
+ "startingFromNumber": "Account Name cannot start with numbers",
+ "minlength": "Account Name should be more than 2 characters",
+ "invalidName": "Please choose another account name; User generated account can't be named `default`"
+ }
+ },
+ "URL": {
+ "TOOLTIP": "",
+ "VALIDATION_MESSAGE": {
+ "required": "URL cannot be empty",
+ "invalidValue": "URL is invalid"
+ }
+ }
+ },
"JFROG": {
"HEADER": "JFrog XRay Scanning",
"BODY": "
JFrog Artifactory is a universal artifact repository manager designed to store, manage, and distribute binaries and artifacts produced during the software development process, including compiled code, libraries, dependencies, Docker images, and more.
Usage in SSD
- JFrog Artifactory notifies SSD of each pipeline execution. SSD identifies the image for every build and connects to the Artifactory repository to pull the newly built image.
- SSD then runs security scans on the pulled images. The scan results are available on the Vulnerability Management page and the Artifact section of the DBOM page.
- JFrog Artifactory helps collect metadata such as Artifact SHA for artifact integrity checks, ensuring security in the supply chain. This information is populated in the DBOM page for audit purposes.
",
diff --git a/charts/ssd/config/ssd-ui/insights-config.json b/charts/ssd/config/ssd-ui/insights-config.json
new file mode 100644
index 0000000..645496b
--- /dev/null
+++ b/charts/ssd/config/ssd-ui/insights-config.json
@@ -0,0 +1,52 @@
+{
+ "insightsConfigData": [
+ {
+ "chartType": "line",
+ "metricName": "Scan wise - Active scans count",
+ "query": "ssd_aggregate_active_workflows{k8s_namespace_name=\"{{ .Release.Namespace}}\"}",
+ "plotAxisName": "ssd_workflow_type"
+ },
+ {
+ "chartType": "line",
+ "metricName": "Scan wise - Scans completed per second",
+ "query": "rate(ssd_aggregate_executed_workflows_total{k8s_namespace_name=\"{{ .Release.Namespace}}\"}[5m])",
+ "plotAxisName": "ssd_workflow_type"
+ },
+ {
+ "chartType": "line",
+ "metricName": "Scan wise - Scans failed per second",
+ "query": "rate(ssd_aggregate_failed_wrokflows_total{k8s_namespace_name=\"{{ .Release.Namespace}}\"}[5m])",
+ "plotAxisName": "ssd_workflow_type"
+ },
+ {
+ "chartType": "area",
+ "metricName": "Source Code Repositories Scanned - Active count",
+ "query": "ssd_aggregate_source_code_repo_scan_active{k8s_namespace_name=\"{{ .Release.Namespace}}\"}",
+ "plotAxisName": ""
+ },
+ {
+ "chartType": "line",
+ "metricName": "Source Code Repositories Scanned - Completed per second",
+ "query": "rate(ssd_aggregate_source_code_repo_scan_executed_total{k8s_namespace_name=\"{{ .Release.Namespace}}\"}[5m])",
+ "plotAxisName": ""
+ },
+ {
+ "chartType": "line",
+ "metricName": "Source Code Repositories Scanned - Failed per second",
+ "query": "rate(ssd_aggregate_source_code_repo_scan_failed_total{k8s_namespace_name=\"{{ .Release.Namespace}}\"}[5m])",
+ "plotAxisName": ""
+ },
+ {
+ "chartType": "line",
+ "metricName": "Number of new repositories discovered",
+ "query": "ssd_aggregate_new_repo_discovered{k8s_namespace_name=\"{{ .Release.Namespace}}\"}",
+ "plotAxisName": ""
+ },
+ {
+ "chartType": "area",
+ "metricName": "CI/Build Events Received per Second",
+ "query": "rate(ssd_aggregate_ci_events_triggered_total{k8s_namespace_name=\"{{ .Release.Namespace}}\"}[5m])",
+ "plotAxisName": ""
+ }
+ ]
+}
diff --git a/charts/ssd/config/ssd-ui/integrators-config.json b/charts/ssd/config/ssd-ui/integrators-config.json
index 5758122..7705877 100644
--- a/charts/ssd/config/ssd-ui/integrators-config.json
+++ b/charts/ssd/config/ssd-ui/integrators-config.json
@@ -595,6 +595,17 @@
"helpText": "Options",
"placeholderText": ""
},
+ "cimonitoring": {
+ "displayName": "CI Monitoring",
+ "dataType": "toggle",
+ "default": "inactive",
+ "featureConfig": true,
+ "requiredKey": "sastsnykscan",
+ "requiredValue": "Cloud Mode",
+ "required": false,
+ "helpText": "Options",
+ "placeholderText": "----------"
+ },
"token": {
"displayName": "Token",
"dataType": "input",
@@ -1832,6 +1843,82 @@
}
]
},
+ {
+ "integratorType": "sysdig",
+ "displayName": "Sysdig",
+ "category": "scanningtool",
+ "multiSupport": true,
+ "testConnectionFlag": true,
+ "deleteAccount": true,
+ "integratorConfigs": {
+ "name": {
+ "displayName": "Account Name",
+ "dataType": "input",
+ "required": true,
+ "helpText": "",
+ "placeholderText": "opsmx-accountname"
+ },
+ "url": {
+ "dataType": "input",
+ "displayName": "URL",
+ "helpText": "URL",
+ "regexpValue": "^(?:https?:\/\/|s?ftps?:\/\/)?(?!www | www\\.)[A-Za-z0-9_-]+\\.+[A-Za-z0-9.\/%&=\\?_:;-]+$|https?:\/\/(?:w{1,3}\\.)?[^\\s.]+(?:\\.[a-z]+)*(?::\\d+)?(?![^<]*(?:<\/\\w+>|\/?>))",
+ "placeholderText": "Example:https://site.com",
+ "required": true,
+ "encrypt": false
+ },
+ "token": {
+ "displayName": "Token",
+ "dataType": "input",
+ "required": true,
+ "encrypt": true,
+ "helpText": "Authentiation Type",
+ "placeholderText": "Example:knjnasjwokldjhse"
+ }
+ },
+ "gridConfigs": [
+ {
+ "name": "Account Name",
+ "prop": "name",
+ "type": "default",
+ "width": 160,
+ "sortable": false,
+ "defatultVisibility": true
+ },
+ {
+ "name": "Url",
+ "prop": "url",
+ "type": "default",
+ "width": 160,
+ "sortable": false,
+ "defatultVisibility": true
+ },
+ {
+ "name": "Team",
+ "prop": "team",
+ "type": "chipSet",
+ "width": 130,
+ "sortable": false,
+ "defatultVisibility": true
+ },
+ {
+ "name": "Environment",
+ "prop": "environments",
+ "type": "chipSet",
+ "width": 130,
+ "sortable": false,
+ "defatultVisibility": true
+ },
+ {
+ "name": "Status",
+ "prop": "status",
+ "type": "toggleSwitch",
+ "width": 80,
+ "sortable": false,
+ "defatultVisibility": true
+ }
+ ]
+ },
{
"integratorType": "syft",
"displayName": "Syft (CMD)",
diff --git a/charts/ssd/config/supplychain-api/app-config.yaml b/charts/ssd/config/supplychain-api/app-config.yaml
index 59e0669..8ec713e 100644
--- a/charts/ssd/config/supplychain-api/app-config.yaml
+++ b/charts/ssd/config/supplychain-api/app-config.yaml
@@ -8,7 +8,11 @@ opsMxSvc: {{.Values.global.ssdUI.protocol}}://{{.Values.global.ssdUI.host}}
ssdOpa: http://ssd-opa:8200
redis: {{ template "ssd.redisBaseURL" . }}
graphqlsvc:
+ {{- if .Values.dgraph.HA.enabled }}
+ host: http://dgraph-alpha-public
+ {{- else }}
host: http://dgraph-public
+ {{- end }}
port: 8080
postgresql:
ssd:
diff --git a/charts/ssd/config/supplychain-api/ssd-integrations.yaml b/charts/ssd/config/supplychain-api/ssd-integrations.yaml
index 0f48df2..0c7e71f 100644
--- a/charts/ssd/config/supplychain-api/ssd-integrations.yaml
+++ b/charts/ssd/config/supplychain-api/ssd-integrations.yaml
@@ -82,6 +82,8 @@ integrationData:
featureConfigs:
sastsnykscan:
default: Local Mode
+ cimonitoring:
+ default: inactive
helmscan:
default: inactive
- integratorType: semgrep
@@ -245,6 +247,14 @@ integrationData:
featureConfigs:
vulnerabilityscan:
default: inactive
+ - integratorType: sysdig
+ category: scanningtool
+ multiaccount: false
+ integratorConfigs:
+ url:
+ encrypt: false
+ token:
+ encrypt: true
- stage: Others
integrations:
- integratorType: chatgpt
diff --git a/charts/ssd/config/supplychain-preprocessor/argocdConfig.yml b/charts/ssd/config/supplychain-preprocessor/argocdConfig.yml
index 6e79f04..631427c 100644
--- a/charts/ssd/config/supplychain-preprocessor/argocdConfig.yml
+++ b/charts/ssd/config/supplychain-preprocessor/argocdConfig.yml
@@ -13,7 +13,11 @@ redis:
password: {{ .Values.redis.password }}
DB: 0
graphql:
+ {{- if .Values.dgraph.HA.enabled }}
+ host: http://dgraph-alpha-public
+ {{- else }}
host: http://dgraph-public
+ {{- end }}
port: 8080
supplychainapi:
diff --git a/charts/ssd/config/supplychain-preprocessor/bitbucketConfig.yml b/charts/ssd/config/supplychain-preprocessor/bitbucketConfig.yml
index 811acdf..d058ff2 100644
--- a/charts/ssd/config/supplychain-preprocessor/bitbucketConfig.yml
+++ b/charts/ssd/config/supplychain-preprocessor/bitbucketConfig.yml
@@ -9,7 +9,11 @@ rabbitmq:
prefetchCount: 50
graphql:
+ {{- if .Values.dgraph.HA.enabled }}
+ host: http://dgraph-alpha-public
+ {{- else }}
host: http://dgraph-public
+ {{- end }}
port: 8080
ssdopasvc:
diff --git a/charts/ssd/config/supplychain-preprocessor/githubactionsConfig.yml b/charts/ssd/config/supplychain-preprocessor/githubactionsConfig.yml
index 974150f..9953f40 100644
--- a/charts/ssd/config/supplychain-preprocessor/githubactionsConfig.yml
+++ b/charts/ssd/config/supplychain-preprocessor/githubactionsConfig.yml
@@ -9,7 +9,11 @@ rabbitmq:
prefetchCount: 50
graphql:
+ {{- if .Values.dgraph.HA.enabled }}
+ host: http://dgraph-alpha-public
+ {{- else }}
host: http://dgraph-public
+ {{- end }}
port: 8080
ssdopasvc:
diff --git a/charts/ssd/config/supplychain-preprocessor/jenkinsConfig.yml b/charts/ssd/config/supplychain-preprocessor/jenkinsConfig.yml
index d2cc5a2..003476c 100644
--- a/charts/ssd/config/supplychain-preprocessor/jenkinsConfig.yml
+++ b/charts/ssd/config/supplychain-preprocessor/jenkinsConfig.yml
@@ -9,7 +9,11 @@ rabbitmq:
prefetchCount: 50
graphql:
+ {{- if .Values.dgraph.HA.enabled }}
+ host: http://dgraph-alpha-public
+ {{- else }}
host: http://dgraph-public
+ {{- end }}
port: 8080
ssdopasvc:
diff --git a/charts/ssd/config/supplychain-preprocessor/spinnakerConfig.yml b/charts/ssd/config/supplychain-preprocessor/spinnakerConfig.yml
index bd8b75d..974a7ea 100644
--- a/charts/ssd/config/supplychain-preprocessor/spinnakerConfig.yml
+++ b/charts/ssd/config/supplychain-preprocessor/spinnakerConfig.yml
@@ -13,7 +13,11 @@ redis:
password: {{ .Values.redis.password }}
DB: 0
graphql:
+ {{- if .Values.dgraph.HA.enabled }}
+ host: http://dgraph-alpha-public
+ {{- else }}
host: http://dgraph-public
+ {{- end }}
port: 8080
supplychainapi:
host: http://supplychain-api
diff --git a/charts/ssd/config/token-machine/secret-config.yaml b/charts/ssd/config/token-machine/secret-config.yaml
index 52f76e4..cb2947f 100644
--- a/charts/ssd/config/token-machine/secret-config.yaml
+++ b/charts/ssd/config/token-machine/secret-config.yaml
@@ -31,3 +31,7 @@ managedSecrets:
serviceName: supplychain-preprocessor
authorizations:
- access-dgraph
+ - name: snyk-monitor-secret
+ serviceName: snyk-monitor
+ authorizations:
+ - access-dgraph
diff --git a/charts/ssd/config/toolchain/tool-chain.yaml b/charts/ssd/config/toolchain/tool-chain.yaml
index fb2eb96..455a8a5 100644
--- a/charts/ssd/config/toolchain/tool-chain.yaml
+++ b/charts/ssd/config/toolchain/tool-chain.yaml
@@ -1,7 +1,13 @@
httpListenPort: 8100
+{{- if .Values.dgraph.HA.enabled }}
+graphQLAddr: http://dgraph-alpha-public:8080/graphql
+{{- else }}
graphQLAddr: http://dgraph-public:8080/graphql
+{{- end }}
cloneRepoOnce: {{ .Values.toolchain.cloneRepoOnce }}
logLevel: {{ .Values.toolchain.logLevel }}
ossSvc: http://34.71.203.90:8070
mobsfClentAddr: http://mobsf-service:8000
ossAnalayze: 60
+otelAddr: localhost:4317
+otelInterval: {{ .Values.otel.interval }}
diff --git a/charts/ssd/ssd-minimal-values.yaml b/charts/ssd/ssd-minimal-values.yaml
index b3d66ab..e2777d3 100644
--- a/charts/ssd/ssd-minimal-values.yaml
+++ b/charts/ssd/ssd-minimal-values.yaml
@@ -30,7 +30,31 @@ organisationname: opsmx
### Admin groups in SSD for Rbac #############################################
admingroups: admin
-###############################################################################
+############ Update the Prometheus Ingress Url ###############################
+prometheusUrl: Promethues.ingress.url.com
+##############################################################################
+### Update the below parameters only if global.minio.enabled is false ########
+### Update the S3 Bucket REGION in AWS #######################################
+s3region: UPDATE.S3BUCKETREGION
+s3bucketurl: https://S3BUCKETNAME.s3.S3BUCKETREGION.amazonaws.com
+#######################################################################################################################
+#### Update the below paramerts only if the user want the Migrate the Existing Data to HA Setup for DGraph ############
+#######################################################################################################################
+dgraph:
+ HA:
+ enabled: false
+ replicas: 3
+ migrateolddata: true
+ ### Update the S3 Bucket Name where the existing Dgraph Data is stored #########################################
+ s3bucketurl: s3:///S3BUCKETNAME/DGRAPHFOLDER
+ saAnnotations: {}
+
+###################################################################################################
+toolchain:
+ ### Update the Annotations so IAM role uses the Kubernetes service accounts to Access S3 Buckets ##################
+ saAnnotations: {}
+ # eks.amazonaws.com/role-arn: arn:aws:iam::111122223333:role/
+################################################################################################
#####User doesn't need to change the below flags let it be the default one######################
storageClass:
ssdgate:
diff --git a/charts/ssd/templates/_helpers.tpl b/charts/ssd/templates/_helpers.tpl
index c2d34ed..6001304 100644
--- a/charts/ssd/templates/_helpers.tpl
+++ b/charts/ssd/templates/_helpers.tpl
@@ -186,3 +186,49 @@ Redis base URL for Spinnaker
{{- printf "redis://%s:%s" .Values.redis.external.host (.Values.redis.external.port | toString) -}}
{{- end }}
{{- end }}
+
+
+{{/*
+Return the proper OTEL image name
+*/}}
+{{- define "otel.image" -}}
+{{- $registryName := .Values.imageCredentials.registry -}}
+{{- $repositoryName := .Values.otel.image.repository -}}
+{{- $tag := .Values.otel.image.tag | toString -}}
+{{- printf "%s:%s" $repositoryName $tag -}}
+{{- end -}}
+
+{{/*
+Return the proper Snyk Monitor image name
+*/}}
+{{- define "snykmonitor.image" -}}
+{{- $registryName := .Values.imageCredentials.registry -}}
+{{- $repositoryName := .Values.snykmonitor.image.repository -}}
+{{- $tag := .Values.snykmonitor.image.tag | toString -}}
+{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
+{{- end -}}
+
+{{/*
+Adding the New container to all Services
+*/}}
+{{- define "otel.sidecar.container" }}
+- name: otel-sidecar
+ image: {{ template "otel.image" . }}
+ args:
+ - '--config=/etc/otel/otel-sidecar-config.yaml'
+ resources: {}
+ volumeMounts:
+ - name: otel-sidecar-volume
+ mountPath: /etc/otel
+ {{- if .Values.otel.securityContext }}
+ securityContext:
+ {{ toYaml .Values.otel.securityContext | nindent 12 }}
+ {{- else }}
+ securityContext: {{ default "{}" }}
+ {{- end }}
+ {{- with .Values.otel.resources }}
+ resources:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+{{- end }}
+
diff --git a/charts/ssd/templates/automation/automation-job.yaml b/charts/ssd/templates/automation/automation-job.yaml
index ad66156..a4f39d5 100644
--- a/charts/ssd/templates/automation/automation-job.yaml
+++ b/charts/ssd/templates/automation/automation-job.yaml
@@ -8,55 +8,137 @@ spec:
containers:
- args:
- |-
- schema_endpoint=http://dgraph-public:8080/admin/schema
+ {{- if and .Values.dgraph.HA.enabled }}
+ schema_endpoint=http://dgraph-alpha-public:8080/admin/schema
setup_endpoint=http://supplychain-api:8099/ssdservice/v1/setup
- echo "Checking the Pod Status of Dgraph,Supply Chain API and SSD DB"
+ echo "Checking the Pod Status of Dgraph,Supply Chain API"
wait_period=0
while true; do
kubectl get po -n {{ .Release.Namespace }} -o jsonpath='{range .items[*]}{..metadata.name}{"\t"}{..containerStatuses..ready}{"\n"}{end}' > live.status
- DGRAPH=$(grep dgraph-0 live.status | awk '{print $2}')
+ kubectl get pods -n {{ .Release.Namespace }} -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.status.containerStatuses[*].ready}{"\n"}{end}' | grep dgraph > dgraph.status
+
+ while read -r pod status; do
+ echo "$pod status: $status"
+ done < dgraph.status
+
+ DGRAPH=$(awk '{if ($2 != "true") exit 1}' dgraph.status && echo "true" || echo "false")
SUPPLYCHAINAPI=$(grep supplychain-api live.status | awk '{print $2}')
- status=$(kubectl get pods -l component=db --field-selector=status.phase=Running --no-headers=true | awk '{print $2}')
wait_period=$(($wait_period+10))
- if [ "$DGRAPH" == "true" ] && [ "$SUPPLYCHAINAPI" == "true" ] && [ "$status" == "1/1" ]; then
- sleep 10
- echo "############******Adding the Dgraph Schema*********#######################"
- SCHEMA_CHECK=`curl -X POST -H "Content-Type: application/json" --data-binary "$(cat /tmp/schema/schema.graphql)" $schema_endpoint`
- echo $SCHEMA_CHECK | grep -i Success
- if [ $? -eq 0 ]; then
- echo "##############***Successfully Updated the Dgraph Schema***########################"
- else
- echo "##############***Failed to Updated the Dgraph Schema***#########################"
- exit 1
- fi
- sleep 30
- echo "#################*****Executing the Setup API Schema****##########################"
- RESP=`curl -X POST -H "Content-Type: application/json" --data-binary "$(cat /tmp/setup.json)" $setup_endpoint`
- sleep 10
- kubectl -n {{ .Release.Namespace }} logs ssd-db-0 > logs.txt
- echo "Checking the Schema is updation"
- count=$(cat logs.txt | grep -i "UpdateSchemeTask done"| wc -l)
- if [ "$count" == "2" ]; then
- kubectl -n {{ .Release.Namespace }} delete pods -l app.kubernetes.io/name=temporal
+ if [ "$DGRAPH" == "true" ] && [ "$SUPPLYCHAINAPI" == "true" ]; then
+ sleep 10
+ echo "############******Adding the Dgraph Schema*********#######################"
+ SCHEMA_CHECK=`curl -X POST -H "Content-Type: application/json" --data-binary "$(cat /tmp/schema/schema.graphql)" $schema_endpoint`
+ echo $SCHEMA_CHECK | grep -i Success
+ if [ $? -eq 0 ]; then
+ echo "##############***Successfully Updated the Dgraph Schema***########################"
+ else
+ echo "##############***Failed to Updated the Dgraph Schema***#########################"
+ exit 1
+ fi
sleep 30
- frontendpod=$(kubectl get pods -l app.kubernetes.io/component=frontend --field-selector=status.phase=Running --no-headers=true | awk '{print $1}')
- kubectl -n {{ .Release.Namespace }} exec -it $frontendpod -- /bin/bash -c "tctl --ns default namespace register -rd 3"
+ echo "#################*****Executing the Setup API Schema****##########################"
+ RESP=`curl -X POST -H "Content-Type: application/json" --data-binary "$(cat /tmp/setup.json)" $setup_endpoint`
+ sleep 10
+ {{- if .Values.db.enabled }}
+ SSDDB=$(kubectl get pods -n {{ .Release.Namespace }} -l component=db --field-selector=status.phase=Running --no-headers=true | awk '{print $2}')
+ if [ "$SSDDB" == "1/1" ]; then
+ echo "Wating for the SSD-DB Pod to come up"
+ sleep 10
+ kubectl -n {{ .Release.Namespace }} logs ssd-db-0 > logs.txt
+ echo "Checking the Schema is updation"
+ count=$(cat logs.txt | grep -i "UpdateSchemeTask done"| wc -l)
+ if [ "$count" == "2" ]; then
+ kubectl -n {{ .Release.Namespace }} delete pods -l app.kubernetes.io/name=temporal
+ sleep 30
+ frontendpod=$(kubectl get pods -n {{ .Release.Namespace }} -l app.kubernetes.io/component=frontend --field-selector=status.phase=Running --no-headers=true | awk '{print $1}')
+ kubectl -n {{ .Release.Namespace }} exec -it $frontendpod -- /bin/bash -c "tctl --ns default namespace register -rd 3"
+ kubectl -n {{ .Release.Namespace }} rollout restart deploy tool-chain
+ break
+ else
+ echo "###########**** Waiting for the SSD-DB Pod to come up ******###########"
+ exit 1
+ fi
+ fi
+ {{- else }}
+ kubectl -n {{ .Release.Namespace }} delete pods -l app.kubernetes.io/name=temporal
+ frontendpod=$(kubectl get pods -n {{ .Release.Namespace }} -l app.kubernetes.io/component=frontend --field-selector=status.phase=Running --no-headers=true | awk '{print $1}')
+ kubectl -n {{ .Release.Namespace }} exec -it $frontendpod -- /bin/bash -c "tctl --ns default namespace register -rd 3"$
kubectl -n {{ .Release.Namespace }} rollout restart deploy tool-chain
- break
+ {{- end }}
else
- echo "The Schema Updation if failed"
- sleep 10
+ if [ $wait_period -gt 1000 ]; then
+ echo "#############****Script is timed out as the Dgraph and Supply chain is not ready yet****########"
+ break
+ else
+ echo "############****Waiting for Dgraph,Supply chain and Token machine services to be ready*****#########"
+ sleep 10
+ fi
fi
- else
- if [ $wait_period -gt 1000 ]; then
- echo "#############****Script is timed out as the Dgraph and Supply chain is not ready yet****########"
- break
+ done
+ {{- else }}
+ schema_endpoint=http://dgraph-public:8080/admin/schema
+ setup_endpoint=http://supplychain-api:8099/ssdservice/v1/setup
+ echo "Checking the Pod Status of Dgraph,Supply Chain API"
+ wait_period=0
+ while true; do
+ kubectl get pods -n {{ .Release.Namespace }} -o jsonpath='{range .items[*]}{..metadata.name}{"\t"}{..containerStatuses..ready}{"\n"}{end}' > live.status
+ DGRAPH=$(grep dgraph-0 live.status | awk '{print $2}')
+ SUPPLYCHAINAPI=$(grep supplychain-api live.status | awk '{print $2}')
+ status=$(kubectl get pods -n {{ .Release.Namespace }} -l component=db --field-selector=status.phase=Running --no-headers=true | awk '{print $2}')
+ wait_period=$(($wait_period+10))
+ if [ "$DGRAPH" == "true" ] && [ "$SUPPLYCHAINAPI" == "true" ] && [ "$status" == "1/1" ]; then
+ sleep 10
+ echo "############******Adding the Dgraph Schema*********#######################"
+ SCHEMA_CHECK=`curl -X POST -H "Content-Type: application/json" --data-binary "$(cat /tmp/schema/schema.graphql)" $schema_endpoint`
+ echo $SCHEMA_CHECK | grep -i Success
+ if [ $? -eq 0 ]; then
+ echo "##############***Successfully Updated the Dgraph Schema***########################"
+ else
+ echo "##############***Failed to Updated the Dgraph Schema***#########################"
+ exit 1
+ fi
+ sleep 30
+ echo "#################*****Executing the Setup API Schema****##########################"
+ RESP=`curl -X POST -H "Content-Type: application/json" --data-binary "$(cat /tmp/setup.json)" $setup_endpoint`
+ sleep 10
+ {{- if .Values.db.enabled }}
+ SSDDB=$(kubectl get pods -n {{ .Release.Namespace }} -l component=db --field-selector=status.phase=Running --no-headers=true | awk '{print $2}')
+ if [ "$SSDDB" == "1/1" ]; then
+ echo "Wating for the SSD-DB Pod to come up"
+ sleep 10
+ kubectl -n {{ .Release.Namespace }} logs ssd-db-0 > logs.txt
+ echo "Checking the Schema is updation"
+ count=$(cat logs.txt | grep -i "UpdateSchemeTask done"| wc -l)
+ if [ "$count" == "2" ]; then
+ kubectl -n {{ .Release.Namespace }} delete pods -l app.kubernetes.io/name=temporal
+ sleep 30
+ frontendpod=$(kubectl get pods -l app.kubernetes.io/component=frontend --field-selector=status.phase=Running --no-headers=true | awk '{print $1}')
+ kubectl -n {{ .Release.Namespace }} exec -it $frontendpod -- /bin/bash -c "tctl --ns default namespace register -rd 3"
+ kubectl -n {{ .Release.Namespace }} rollout restart deploy tool-chain
+ break
+ else
+ echo "###########**** Waiting for the SSD-DB Pod to come up ******###########"
+ exit 1
+ fi
+ fi
+ {{- else }}
+ kubectl -n {{ .Release.Namespace }} delete pods -l app.kubernetes.io/name=temporal
+ frontendpod=$(kubectl get pods -n {{ .Release.Namespace }} -l app.kubernetes.io/component=frontend --field-selector=status.phase=Running --no-headers=true | awk '{print $1}')
+ kubectl -n {{ .Release.Namespace }} exec -it $frontendpod -- /bin/bash -c "tctl --ns default namespace register -rd 3"$
+ kubectl -n {{ .Release.Namespace }} rollout restart deploy tool-chain
+ {{- end }}
+
else
- echo "############****Waiting for Dgraph,Supply chain and Token machine services to be ready*****#########"
- sleep 10
- fi
- fi
+ if [ $wait_period -gt 1000 ]; then
+ echo "#############****Script is timed out as the Dgraph and Supply chain is not ready yet****########"
+ break
+ else
+ echo "############****Waiting for Dgraph,Supply chain and Token machine services to be ready*****#########"
+ sleep 10
+ fi
+ fi
done
+ {{- end }}
command:
- /bin/bash
- +x
diff --git a/charts/ssd/templates/dgraph/sa.yaml b/charts/ssd/templates/dgraph/sa.yaml
new file mode 100644
index 0000000..b726cb7
--- /dev/null
+++ b/charts/ssd/templates/dgraph/sa.yaml
@@ -0,0 +1,10 @@
+{{- if .Values.dgraph.HA.enabled }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: dgraph-sa
+{{- if .Values.dgraph.HA.saAnnotations }}
+ annotations:
+{{ toYaml .Values.dgraph.HA.saAnnotations | indent 4 }}
+{{- end }}
+{{- end }}
diff --git a/charts/ssd/templates/dgraph/sts.yaml b/charts/ssd/templates/dgraph/sts.yaml
index f209f0f..8f6c199 100644
--- a/charts/ssd/templates/dgraph/sts.yaml
+++ b/charts/ssd/templates/dgraph/sts.yaml
@@ -1,3 +1,295 @@
+{{- if .Values.dgraph.HA.enabled }}
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: dgraph-zero
+spec:
+ serviceName: "dgraph-zero"
+ replicas: {{ .Values.dgraph.HA.replicas }}
+ selector:
+ matchLabels:
+ app: dgraph-zero
+ template:
+ metadata:
+ labels:
+ app: dgraph-zero
+ spec:
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app
+ operator: In
+ values:
+ - dgraph-zero
+ topologyKey: kubernetes.io/hostname
+ containers:
+ - name: zero
+ image: dgraph/dgraph:latest
+ imagePullPolicy: IfNotPresent
+ ports:
+ - containerPort: 5080
+ name: grpc-zero
+ - containerPort: 6080
+ name: http-zero
+ volumeMounts:
+ - name: datadir
+ mountPath: /dgraph
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ command:
+ - bash
+ - "-c"
+ - |
+ set -ex
+ [[ `hostname` =~ -([0-9]+)$ ]] || exit 1
+ ordinal=${BASH_REMATCH[1]}
+ idx=$(($ordinal + 1))
+ if [[ $ordinal -eq 0 ]]; then
+ exec dgraph zero --my=$(hostname -f):5080 --raft="idx=$idx" --replicas {{ .Values.dgraph.HA.replicas }}
+ else
+ exec dgraph zero --my=$(hostname -f):5080 --peer dgraph-zero-0.dgraph-zero.${POD_NAMESPACE}.svc.cluster.local:5080 --raft="idx=$idx" --replicas {{ .Values.dgraph.HA.replicas }}
+ fi
+ {{- with .Values.dgraph.probes }}
+ {{- toYaml . | nindent 8 }}
+ {{- else }}
+ livenessProbe:
+ httpGet:
+ path: /health
+ port: 6080
+ initialDelaySeconds: 15
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+ readinessProbe:
+ httpGet:
+ path: /health
+ port: 6080
+ initialDelaySeconds: 15
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+ {{- end }}
+ {{- with .Values.dgraph.resources }}
+ resources:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ terminationGracePeriodSeconds: 60
+ volumes:
+ - name: datadir
+ persistentVolumeClaim:
+ claimName: datadir
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.topologySpreadConstraints }}
+ topologySpreadConstraints:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ updateStrategy:
+ type: RollingUpdate
+ volumeClaimTemplates:
+ - metadata:
+ name: datadir
+ spec:
+ {{- if .Values.storageClass }}
+ storageClassName: {{ .Values.storageClass }}
+ {{- end }}
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: {{ .Values.dgraph.storageMountSize }}
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: dgraph-alpha
+spec:
+ serviceName: "dgraph-alpha"
+ replicas: {{ .Values.dgraph.HA.replicas }}
+ selector:
+ matchLabels:
+ app: dgraph-alpha
+ template:
+ metadata:
+ labels:
+ app: dgraph-alpha
+ spec:
+ serviceAccountName: dgraph-sa
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app
+ operator: In
+ values:
+ - dgraph-alpha
+ topologyKey: kubernetes.io/hostname
+ containers:
+ - name: alpha
+ image: dgraph/dgraph:latest
+ imagePullPolicy: IfNotPresent
+ ports:
+ - containerPort: 7080
+ name: grpc-alpha-int
+ - containerPort: 8080
+ name: http-alpha
+ - containerPort: 9080
+ name: grpc-alpha
+ volumeMounts:
+ - name: datadir
+ mountPath: /dgraph
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ #- name: AWS_DEFAULT_REGION
+ # value: us-east-1
+ #- name: AWS_ACCESS_KEY_ID
+ # valueFrom:
+ # secretKeyRef:
+ # key: accesskey
+ # name: aws-s3-creds
+ #- name: AWS_SECRET_ACCESS_KEY
+ # valueFrom:
+ # secretKeyRef:
+ # key: secretkey
+ # name: aws-s3-creds
+ - name: ENABLE_IMPORT
+ value: "{{ .Values.dgraph.HA.migrateolddata }}"
+ command:
+ - bash
+ - "-c"
+ - |
+ set -ex
+ sleep 30
+ if [ "${ENABLE_IMPORT}" == "true" ]; then
+
+ export DGRAPH_LEADER=$(curl -s http://dgraph-zero-public.${POD_NAMESPACE}.svc.cluster.local:6080/state | jq -r '.zeros[] | select(.leader == true) | .addr')
+ curl -s http://dgraph-zero-public.${POD_NAMESPACE}.svc.cluster.local:6080/state | jq -r '.zeros[] | select(.leader == true) | .addr'
+ dgraph bulk -f {{ .Values.dgraph.HA.s3bucketurl }}/g01.rdf.gz -g {{ .Values.dgraph.HA.s3bucketurl }}/g01.gql_schema.gz --format=rdf --zero=${DGRAPH_LEADER}
+ cp -r out/0/p .
+ else
+ echo "Skipping dgraph data import due to ENABLE_IMPORT variable set to \"false\""
+ fi
+ dgraph alpha --my=$(hostname -f):7080 --zero dgraph-zero-0.dgraph-zero.${POD_NAMESPACE}.svc.cluster.local:5080,dgraph-zero-1.dgraph-zero.${POD_NAMESPACE}.svc.cluster.local:5080,dgraph-zero-2.dgraph-zero.${POD_NAMESPACE}.svc.cluster.local:5080 --security whitelist=0.0.0.0/0,::/0
+ {{- with .Values.dgraph.probes }}
+ {{- toYaml . | nindent 8 }}
+ {{- else }}
+ livenessProbe:
+ httpGet:
+ path: /health?live=1
+ port: 8080
+ initialDelaySeconds: 15
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+ readinessProbe:
+ httpGet:
+ path: /health
+ port: 8080
+ initialDelaySeconds: 15
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 6
+ successThreshold: 1
+ {{- end }}
+ {{- with .Values.dgraph.resources }}
+ resources:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ terminationGracePeriodSeconds: 600
+ volumes:
+ - name: datadir
+ persistentVolumeClaim:
+ claimName: datadir
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.topologySpreadConstraints }}
+ topologySpreadConstraints:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ updateStrategy:
+ type: RollingUpdate
+ volumeClaimTemplates:
+ - metadata:
+ name: datadir
+ spec:
+ {{- if .Values.storageClass }}
+ storageClassName: {{ .Values.storageClass }}
+ {{- end }}
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: {{ .Values.dgraph.storageMountSize }}
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: dgraph-ratel
+ labels:
+ app: dgraph-ratel
+spec:
+ selector:
+ matchLabels:
+ app: dgraph-ratel
+ template:
+ metadata:
+ labels:
+ app: dgraph-ratel
+ spec:
+ containers:
+ - name: ratel
+ image: dgraph/ratel:latest
+ ports:
+ - containerPort: 8000
+ {{- with .Values.ratel.resources }}
+ resources:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.topologySpreadConstraints }}
+ topologySpreadConstraints:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+{{- else }}
apiVersion: apps/v1
kind: StatefulSet
metadata:
@@ -124,11 +416,11 @@ spec:
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
-{{- if .Values.noPvc }}
+ {{- if .Values.noPvc }}
volumes:
- name: datadir
emptyDir: {}
-{{- else }}
+ {{- else }}
volumeClaimTemplates:
- metadata:
name: datadir
@@ -142,5 +434,6 @@ spec:
- "ReadWriteOnce"
resources:
requests:
- storage: 5Gi
+ storage: {{ .Values.dgraph.storageMountSize }}
+ {{- end }}
{{- end }}
diff --git a/charts/ssd/templates/dgraph/svc.yaml b/charts/ssd/templates/dgraph/svc.yaml
index fcc897f..4292bf4 100644
--- a/charts/ssd/templates/dgraph/svc.yaml
+++ b/charts/ssd/templates/dgraph/svc.yaml
@@ -1,3 +1,89 @@
+{{- if .Values.dgraph.HA.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: dgraph-zero-public
+ labels:
+ app: dgraph-zero
+ monitor: zero-dgraph-io
+spec:
+ type: ClusterIP
+ ports:
+ - port: 5080
+ targetPort: 5080
+ name: grpc-zero
+ - port: 6080
+ targetPort: 6080
+ name: http-zero
+ selector:
+ app: dgraph-zero
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: dgraph-alpha-public
+ labels:
+ app: dgraph-alpha
+ monitor: alpha-dgraph-io
+spec:
+ type: ClusterIP
+ ports:
+ - port: 8080
+ targetPort: 8080
+ name: http-alpha
+ - port: 9080
+ targetPort: 9080
+ name: grpc-alpha
+ selector:
+ app: dgraph-alpha
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: dgraph-ratel-public
+ labels:
+ app: dgraph-ratel
+spec:
+ type: ClusterIP
+ ports:
+ - port: 8000
+ targetPort: 8000
+ name: http-ratel
+ selector:
+ app: dgraph-ratel
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: dgraph-zero
+ labels:
+ app: dgraph-zero
+spec:
+ ports:
+ - port: 5080
+ targetPort: 5080
+ name: grpc-zero
+ clusterIP: None
+ publishNotReadyAddresses: true
+ selector:
+ app: dgraph-zero
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: dgraph-alpha
+ labels:
+ app: dgraph-alpha
+spec:
+ ports:
+ - port: 7080
+ targetPort: 7080
+ name: grpc-alpha-int
+ clusterIP: None
+ publishNotReadyAddresses: true
+ selector:
+ app: dgraph-alpha
+{{- else }}
apiVersion: v1
kind: Service
metadata:
@@ -30,3 +116,4 @@ spec:
selector:
app: ssd
component: dgraph
+{{- end }}
diff --git a/charts/ssd/templates/otel/otel-gateway-config.yaml b/charts/ssd/templates/otel/otel-gateway-config.yaml
new file mode 100644
index 0000000..642929c
--- /dev/null
+++ b/charts/ssd/templates/otel/otel-gateway-config.yaml
@@ -0,0 +1,10 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: otel-gateway-config
+ labels:
+ app: ssd
+{{ include "ssd.standard-labels" . | indent 4 }}
+ component: otel-gateway
+data:
+{{ (tpl (.Files.Glob "config/otel/otel-gateway-config.yaml").AsConfig . ) | indent 2 }}
diff --git a/charts/ssd/templates/otel/otel-gateway-deploy.yaml b/charts/ssd/templates/otel/otel-gateway-deploy.yaml
new file mode 100644
index 0000000..2e853a7
--- /dev/null
+++ b/charts/ssd/templates/otel/otel-gateway-deploy.yaml
@@ -0,0 +1,88 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: ssd
+ component: otel-gateway
+{{ include "ssd.standard-labels" . | indent 4 }}
+ name: otel-gateway
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: ssd
+ component: otel-gateway
+ strategy:
+ rollingUpdate:
+ maxSurge: 25%
+ maxUnavailable: 25%
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app: ssd
+ component: otel-gateway
+{{- if .Values.customLabels }}
+{{ toYaml .Values.customLabels | indent 8}}
+{{- end }}
+ annotations:
+{{- if .Values.otel.annotations -}}
+ {{ toYaml .Values.otel.annotations | nindent 8 }}
+{{- end }}
+ spec:
+ serviceAccountName: otel-collector-{{ .Release.Namespace }}
+ containers:
+ - args:
+ - --config=/etc/otel/otel-gateway-config.yaml
+ image: {{ template "otel.image" . }}
+ imagePullPolicy: Always
+ name: otel-gateway
+ ports:
+ - containerPort: 4317
+ name: otlp-grpc
+ protocol: TCP
+ - containerPort: 4318
+ name: otlp-http
+ protocol: TCP
+ - containerPort: 9464
+ name: prometheus
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /etc/otel
+ name: config-volume
+ {{- with .Values.otel.resources }}
+ resources:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ {{- if .Values.otel.securityContext }}
+ securityContext:
+ {{ toYaml .Values.otel.securityContext | nindent 12 }}
+ {{- else }}
+ securityContext: {{ default "{}" }}
+ {{- end }}
+ {{- if .Values.imagePullSecret }}
+ imagePullSecrets:
+ - name: {{ .Values.imagePullSecret }}
+ {{- end }}
+ restartPolicy: Always
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.topologySpreadConstraints }}
+ topologySpreadConstraints:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ volumes:
+ - configMap:
+ defaultMode: 420
+ name: otel-gateway-config
+ name: config-volume
diff --git a/charts/ssd/templates/otel/otel-gateway-svc.yaml b/charts/ssd/templates/otel/otel-gateway-svc.yaml
new file mode 100644
index 0000000..3e4946d
--- /dev/null
+++ b/charts/ssd/templates/otel/otel-gateway-svc.yaml
@@ -0,0 +1,29 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: otel-gateway
+ labels:
+ app: ssd
+ component: otel-gateway
+{{ include "ssd.standard-labels" . | indent 4 }}
+{{- if .Values.otel.serviceAnnotations }}
+ annotations:
+{{ toYaml .Values.otel.serviceAnnotations | indent 4 }}
+{{- end }}
+spec:
+ ports:
+ - name: otlp-grpc
+ port: 4317
+ protocol: TCP
+ targetPort: 4317
+ - name: otlp-http
+ port: 4318
+ protocol: TCP
+ targetPort: 4318
+ - name: prometheus
+ port: 9464
+ protocol: TCP
+ targetPort: 9464
+ selector:
+ app: ssd
+ component: otel-gateway
diff --git a/charts/ssd/templates/otel/otel-sidecar-config.yaml b/charts/ssd/templates/otel/otel-sidecar-config.yaml
new file mode 100644
index 0000000..2227f33
--- /dev/null
+++ b/charts/ssd/templates/otel/otel-sidecar-config.yaml
@@ -0,0 +1,10 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: otel-sidecar-config
+ labels:
+ app: ssd
+{{ include "ssd.standard-labels" . | indent 4 }}
+ component: otel-sidecar
+data:
+{{ (tpl (.Files.Glob "config/otel/otel-sidecar-config.yaml").AsConfig . ) | indent 2 }}
diff --git a/charts/ssd/templates/otel/sa.yaml b/charts/ssd/templates/otel/sa.yaml
new file mode 100644
index 0000000..c3c4898
--- /dev/null
+++ b/charts/ssd/templates/otel/sa.yaml
@@ -0,0 +1,36 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: otel-collector-{{ .Release.Namespace }}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: otel-collector-role-{{ .Release.Namespace }}
+rules:
+ - apiGroups: [""]
+ resources:
+ - nodes/stats
+ - nodes/proxy
+ - nodes/metrics
+ - nodes
+ - pods
+ - namespaces
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["apps"]
+ resources:
+ - replicasets
+ verbs: ["get", "list", "watch"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: otel-collector-rolebinding-{{ .Release.Namespace }}
+subjects:
+ - kind: ServiceAccount
+ name: otel-collector-{{ .Release.Namespace }}
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ kind: ClusterRole
+ name: otel-collector-role-{{ .Release.Namespace }}
+ apiGroup: rbac.authorization.k8s.io
diff --git a/charts/ssd/templates/snyk-monitor/snyk-monitor-cm.yaml b/charts/ssd/templates/snyk-monitor/snyk-monitor-cm.yaml
new file mode 100644
index 0000000..7aa0045
--- /dev/null
+++ b/charts/ssd/templates/snyk-monitor/snyk-monitor-cm.yaml
@@ -0,0 +1,10 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: snyk-monitor-config
+ labels:
+ app: ssd
+{{ include "ssd.standard-labels" . | indent 4 }}
+ component: snyk-monitor
+data:
+{{ (tpl (.Files.Glob "config/snyk-monitor/config.yaml").AsConfig . ) | indent 2 }}
diff --git a/charts/ssd/templates/snyk-monitor/snyk-monitor-deploy.yaml b/charts/ssd/templates/snyk-monitor/snyk-monitor-deploy.yaml
new file mode 100644
index 0000000..32b8c30
--- /dev/null
+++ b/charts/ssd/templates/snyk-monitor/snyk-monitor-deploy.yaml
@@ -0,0 +1,101 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: snyk-monitor
+ labels:
+ app: ssd
+ component: snyk-monitor
+{{ include "ssd.standard-labels" . | indent 4 }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: ssd
+ component: snyk-monitor
+ template:
+ metadata:
+ labels:
+ app: ssd
+ component: snyk-monitor
+{{ include "ssd.standard-labels" . | indent 8 }}
+ annotations:
+{{- if .Values.snykmonitor.annotations -}}
+ {{ toYaml .Values.snykmonitor.annotations | nindent 8 }}
+{{- end }}
+ spec:
+ containers:
+{{ include "otel.sidecar.container" . | indent 8 }}
+ - name: snyk-monitor
+ image: {{ template "snykmonitor.image" . }} # Change this to your actual image in a registry
+ volumeMounts:
+ - name: ssd-public-keys
+ mountPath: /etc/ssd-public-keys
+ - name: ssdsecret
+ readOnly: true
+ mountPath: /etc/secrets
+ - name: cli-config
+ mountPath: /etc/config
+ - name: snyk-monitor-secret
+ mountPath: /etc/secrets/dgraph-secret
+ readOnly: true
+ {{- with .Values.snykmonitor.resources }}
+ resources:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+
+ {{- if .Values.snykmonitor.securityContext }}
+ securityContext:
+ {{ toYaml .Values.snykmonitor.securityContext | nindent 12 }}
+ {{- else }}
+ securityContext: {{ default "{}" }}
+ {{- end }}
+
+ {{- if .Values.imagePullSecret }}
+ imagePullSecrets:
+ - name: {{ .Values.imagePullSecret }}
+ {{- end }}
+ volumes:
+ - configMap:
+ defaultMode: 420
+ name: otel-sidecar-config
+ name: otel-sidecar-volume
+ - name: ssd-public-keys
+ configMap:
+ name: ssd-public-keys
+ defaultMode: 420
+ - name: cli-config
+ configMap:
+ name: snyk-monitor-config
+ items:
+ - key: config.yaml
+ path: config.yaml
+ defaultMode: 420
+ - name: ssdsecret
+ secret:
+ secretName: ssdsecret
+ items:
+ - key: AESEncryptionKey
+ path: ssdsecret/AESEncryptionKey
+ defaultMode: 420
+ - name: snyk-monitor-secret
+ secret:
+ defaultMode: 420
+ secretName: snyk-monitor-secret
+ restartPolicy: Always
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.topologySpreadConstraints }}
+ topologySpreadConstraints:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+
diff --git a/charts/ssd/templates/ssd-opa/ssd-opa-deployment.yaml b/charts/ssd/templates/ssd-opa/ssd-opa-deployment.yaml
index af1e656..036f289 100644
--- a/charts/ssd/templates/ssd-opa/ssd-opa-deployment.yaml
+++ b/charts/ssd/templates/ssd-opa/ssd-opa-deployment.yaml
@@ -98,6 +98,7 @@ spec:
imagePullSecrets:
- name: {{ .Values.imagePullSecret }}
{{- end }}
+{{ include "otel.sidecar.container" . | indent 8 }}
volumes:
- configMap:
defaultMode: 420
@@ -138,6 +139,10 @@ spec:
path: ca.crt
optional: true
name: customer-custon-ca
+ - name: otel-sidecar-volume
+ configMap:
+ name: otel-sidecar-config
+ defaultMode: 420
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
diff --git a/charts/ssd/templates/ssd-ui/ssd-ui-configmap.yaml b/charts/ssd/templates/ssd-ui/ssd-ui-configmap.yaml
index f5a0e2e..b0a99ed 100644
--- a/charts/ssd/templates/ssd-ui/ssd-ui-configmap.yaml
+++ b/charts/ssd/templates/ssd-ui/ssd-ui-configmap.yaml
@@ -3,6 +3,7 @@ data:
{{ (tpl (.Files.Glob "config/ssd-ui/app-config.json").AsConfig . ) | indent 2 }}
{{ (tpl (.Files.Glob "config/ssd-ui/help-text.json").AsConfig . ) | indent 2 }}
{{ (tpl (.Files.Glob "config/ssd-ui/integrators-config.json").AsConfig . ) | indent 2 }}
+{{ (tpl (.Files.Glob "config/ssd-ui/insights-config.json").AsConfig . ) | indent 2 }}
kind: ConfigMap
metadata:
name: ssd-ui-config
diff --git a/charts/ssd/templates/ssd-ui/ssd-ui-deployment.yaml b/charts/ssd/templates/ssd-ui/ssd-ui-deployment.yaml
index 89f9422..6adf9b2 100644
--- a/charts/ssd/templates/ssd-ui/ssd-ui-deployment.yaml
+++ b/charts/ssd/templates/ssd-ui/ssd-ui-deployment.yaml
@@ -50,6 +50,9 @@ spec:
- name: config-dir
mountPath: /var/www/html/ui/assets/config/integrators-config.json
subPath: integrators-config.json
+ - name: config-dir
+ mountPath: /var/www/html/ui/assets/config/insights-config.json
+ subPath: insights-config.json
- mountPath: /etc/nginx/nginx.conf
name: nginx-config
subPath: nginx.conf
diff --git a/charts/ssd/templates/toolchain/toolchain-deployment.yaml b/charts/ssd/templates/toolchain/toolchain-deployment.yaml
index a01a24a..ed684e3 100644
--- a/charts/ssd/templates/toolchain/toolchain-deployment.yaml
+++ b/charts/ssd/templates/toolchain/toolchain-deployment.yaml
@@ -25,6 +25,9 @@ spec:
{{ toYaml .Values.toolchain.annotations | nindent 8 }}
{{- end }}
spec:
+ {{- if not .Values.global.minio.enabled }}
+ serviceAccountName: toolchain-sa
+ {{- end }}
initContainers:
- name: temporalfrontend-check
image: {{ .Values.global.customImages.registry }}/pgchecker:v1
@@ -41,10 +44,16 @@ spec:
- name: tool-chain
image: {{ template "toolchain.image" . }}
env:
+ {{- if .Values.dgraph.HA.enabled }}
+ - name: GRAPHQL_URL
+ value: http://dgraph-alpha-public:8080/graphql
+ {{- else }}
- name: GRAPHQL_URL
value: http://dgraph-public:8080/graphql
+ {{- end }}
- name: TEMPORAL_HOSTPORT
value: {{ .Release.Name }}-temporal-frontend:7233
+ {{- if .Values.global.minio.enabled }}
- name: S3_ENDPOINT_URL
value: http://{{ .Release.Name }}-minio:9000/
- name: AWS_ACCESS_KEY_ID
@@ -57,6 +66,12 @@ spec:
secretKeyRef:
key: secretkey
name: {{ .Release.Name }}-minio
+ {{- else }}
+ - name: S3_ENDPOINT_URL
+ value: {{ .Values.s3bucketurl }}
+ - name: S3_Region
+ value: {{ .Values.s3region }}
+ {{- end }}
- name: Cypher_Key
valueFrom:
secretKeyRef:
@@ -128,6 +143,7 @@ spec:
imagePullSecrets:
- name: {{ .Values.imagePullSecret }}
{{- end }}
+{{ include "otel.sidecar.container" . | indent 8 }}
volumes:
- name: scanresult
emptyDir: {}
@@ -160,6 +176,10 @@ spec:
configMap:
defaultMode: 420
name: ssd-public-keys
+ - name: otel-sidecar-volume
+ configMap:
+ name: otel-sidecar-config
+ defaultMode: 420
restartPolicy: Always
{{- with .Values.nodeSelector }}
nodeSelector:
diff --git a/charts/ssd/templates/toolchain/toolchain-sa.yaml b/charts/ssd/templates/toolchain/toolchain-sa.yaml
new file mode 100644
index 0000000..b0a43da
--- /dev/null
+++ b/charts/ssd/templates/toolchain/toolchain-sa.yaml
@@ -0,0 +1,10 @@
+{{- if not .Values.global.minio.enabled }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: toolchain-sa
+{{- if .Values.toolchain.saAnnotations }}
+ annotations:
+{{ toYaml .Values.toolchain.saAnnotations | indent 4 }}
+{{- end }}
+{{- end }}
diff --git a/charts/ssd/values.yaml b/charts/ssd/values.yaml
index c175d83..9b79478 100644
--- a/charts/ssd/values.yaml
+++ b/charts/ssd/values.yaml
@@ -34,6 +34,15 @@ organisationname: default
## Admin Groups for Rbac #######################
admingroups: admin
+
+#############################################################################
+# Update the Prometheus Ingress Url ########################################
+prometheusUrl: Promethues.ingress.url.com
+###############################################################################
+### Update the below parameters only if global.minio.enabled is false ########
+### Update the S3 Bucket REGION in AWS #######################################
+s3region: awsregion
+s3bucketurl: https://s3bucketname.s3.awsregion.amazonaws.com
####################################################
# Global variables can be accessed across all the charts including sub-charts
global:
@@ -110,14 +119,32 @@ global:
size: 10Gi
# redis:
# password:
+#######################################################################################################################
+#### Update the below paramerts only if the user want the Migrate the Existing Data to HA Setup for DGraph ############
+#######################################################################################################################
dgraph:
+ HA:
+ enabled: false
+ replicas: 3
+ migrateolddata: true
+ ### Update the S3 Bucket Name where the existing Dgraph Data is stored ###########################################
+ s3bucketurl: s3:///BUCKETNAME/DGRAPHFOLDER
+ ### Update the Annotations so IAM role uses the Kubernetes service accounts to Access S3 Buckets ##################
+ saAnnotations: {}
+ # eks.amazonaws.com/role-arn: arn:aws:iam::111122223333:role/
+
image:
repository: dgraph
tag: v23.1.0
+ storageMountSize: 5Gi
+ probes: {}
+ resources: {}
+
ratel:
image:
repository: ratel
tag: v21.12.0
+ resources: {}
curl:
image:
repository: curl
@@ -216,7 +243,7 @@ toolchain:
## Image specific details
image:
repository: tool-chain
- tag: "2025-01-00"
+ tag: "2024-12-03"
pullPolicy: IfNotPresent
replicaCount: 1
@@ -225,6 +252,10 @@ toolchain:
serviceAnnotations: {}
+ ### Update the Annotations so IAM role uses the Kubernetes service accounts to Access S3 Buckets ##################
+ saAnnotations: {}
+ # eks.amazonaws.com/role-arn: arn:aws:iam::111122223333:role/
+
resources: {}
probes: {}
@@ -265,7 +296,7 @@ ssdopa:
## Image specific details
image:
repository: ssd-opa
- tag: "2025-01-00"
+ tag: "2024-12-02"
pullPolicy: IfNotPresent
replicaCount: 1
@@ -286,7 +317,7 @@ supplychainapi:
## Image specific details
image:
repository: supplychain-api
- tag: "2025-01-00"
+ tag: "2024-12-04"
pullPolicy: IfNotPresent
replicaCount: 1
@@ -349,7 +380,7 @@ ui:
## Image specific details
image:
repository: ssd-ui
- tag: "2025-01-00"
+ tag: "2024-12-03"
pullPolicy: IfNotPresent
annotations: {}
@@ -403,6 +434,47 @@ zap:
securityContext: {}
##############################################################################
+otel:
+ image:
+ repository: otel/opentelemetry-collector-contrib
+ tag: "latest"
+ pullPolicy: IfNotPresent
+
+ replicaCount: 1
+ annotations: {}
+
+ serviceAnnotations: {}
+
+ resources: {}
+
+ probes: {}
+
+ securityContext: {}
+
+ interval: 10
+
+ LongInterval: 21600
+
+##############################################################################
+snykmonitor:
+ image:
+ repository: snyk-monitor
+ tag: "2025-02-00"
+ pullPolicy: IfNotPresent
+
+ replicaCount: 1
+ annotations: {}
+
+ resources: {}
+
+ probes: {}
+
+ securityContext: {}
+
+ interval: 120m # Format: 0h0m0s (default: 24h)
+
+ loglevel: info
+##############################################################################
kubedetector:
image:
repository: kubernetes-detector