From 46991fad31c268fbf2f2d3732862dc12a52dbdb4 Mon Sep 17 00:00:00 2001 From: grig777 Date: Sun, 2 Mar 2025 15:33:34 -0500 Subject: [PATCH 1/4] Add local dependancies and CI check --- .github/workflows/deploy-helm-chart.yml | 30 + stable/jfrog-platform/Chart.lock | 12 +- stable/jfrog-platform/Chart.yaml | 8 +- .../rabbitmq/.helmignore | 25 + .../rabbitmq/CHANGELOG.md | 5 + .../rabbitmq/Chart.lock | 6 + .../rabbitmq/Chart.yaml | 31 + .../rabbitmq/README.md | 872 ++++++++ .../rabbitmq/templates/NOTES.txt | 156 ++ .../rabbitmq/templates/_helpers.tpl | 319 +++ .../rabbitmq/templates/config-secret.yaml | 25 + .../rabbitmq/templates/extra-list.yaml | 9 + .../templates/ingress-tls-secrets.yaml | 42 + .../rabbitmq/templates/ingress.yaml | 58 + .../rabbitmq/templates/init-configmap.yaml | 17 + .../rabbitmq/templates/networkpolicy.yaml | 91 + .../rabbitmq/templates/pdb.yaml | 25 + .../rabbitmq/templates/prometheusrule.yaml | 24 + .../rabbitmq/templates/role.yaml | 25 + .../rabbitmq/templates/rolebinding.yaml | 22 + .../rabbitmq/templates/secrets.yaml | 61 + .../rabbitmq/templates/serviceaccount.yaml | 20 + .../rabbitmq/templates/servicemonitor.yaml | 56 + .../rabbitmq/templates/statefulset.yaml | 515 +++++ .../rabbitmq/templates/svc-headless.yaml | 44 + .../rabbitmq/templates/svc.yaml | 110 + .../rabbitmq/templates/tls-secrets.yaml | 35 + .../rabbitmq/templates/validation.yaml | 7 + .../rabbitmq/values.schema.json | 100 + .../rabbitmq/values.yaml | 1527 ++++++++++++++ .../local_dependancy_charts/xray/.helmignore | 22 + .../local_dependancy_charts/xray/CHANGELOG.md | 934 +++++++++ .../local_dependancy_charts/xray/Chart.lock | 12 + .../local_dependancy_charts/xray/Chart.yaml | 31 + .../local_dependancy_charts/xray/LICENSE | 201 ++ .../local_dependancy_charts/xray/README.md | 66 + .../xray/ci/default-values.yaml | 90 + .../xray/ci/global-section-values.yaml | 155 ++ .../ci/test-rabbitmq-haQuorum-values.yaml | 65 + .../ci/test-rabbitmq-replicaCount-values.yaml | 58 + .../xray/ci/test-values.yaml | 175 ++ .../xray/files/system.yaml | 86 + .../xray/logo/xray-logo.png | Bin 0 -> 11735 bytes .../xray/rabbitmq/ha-quorum.yaml | 10 + .../xray/rabbitmq/migration-to-ha-quorum.yaml | 12 + .../xray/sizing/xray-2xlarge.yaml | 148 ++ .../xray/sizing/xray-large.yaml | 148 ++ .../xray/sizing/xray-medium.yaml | 148 ++ .../xray/sizing/xray-small.yaml | 148 ++ .../xray/sizing/xray-xlarge.yaml | 148 ++ .../xray/sizing/xray-xsmall.yaml | 148 ++ .../xray/templates/NOTES.txt | 28 + .../xray/templates/_helpers.tpl | 755 +++++++ .../xray/templates/_system-yaml-render.tpl | 5 + .../xray/templates/additional-resources.yaml | 3 + .../templates/catalog-db-create-hook.yaml | 69 + .../xray/templates/filebeat-configmap.yaml | 15 + .../xray/templates/keys-warnings.yaml | 15 + .../xray/templates/logger-configmap.yaml | 63 + .../xray/templates/migration-hook.yaml | 197 ++ .../xray/templates/xray-configmaps.yaml | 13 + .../xray/templates/xray-custom-secrets.yaml | 19 + .../xray/templates/xray-database-secrets.yaml | 27 + .../xray/templates/xray-hpa-ipa.yaml | 31 + .../xray/templates/xray-hpa-server.yaml | 31 + .../xray/templates/xray-hpa.yaml | 37 + .../xray/templates/xray-ipa-deployment.yaml | 1411 +++++++++++++ .../xray/templates/xray-ipa-svc.yaml | 60 + .../xray/templates/xray-keda-hpa-ipa.yaml | 47 + .../xray/templates/xray-keda-hpa-server.yaml | 47 + .../xray/templates/xray-keda-hpa.yaml | 47 + .../xray/templates/xray-keda-secret.yaml | 13 + .../xray-keda-trigger-authentication.yaml | 16 + .../xray/templates/xray-networkpolicy.yaml | 33 + .../xray/templates/xray-pdb.yaml | 23 + .../xray/templates/xray-priority-class.yaml | 9 + .../xray/templates/xray-resourcequota.yaml | 14 + .../xray/templates/xray-role.yaml | 13 + .../xray/templates/xray-rolebinding.yaml | 18 + .../xray/templates/xray-secrets.yaml | 37 + .../templates/xray-server-deployment.yaml | 727 +++++++ .../xray/templates/xray-serviceaccount.yaml | 16 + .../xray/templates/xray-statefulset.yaml | 1539 ++++++++++++++ .../xray/templates/xray-svc.yaml | 42 + .../xray/templates/xray-system-yaml.yaml | 15 + .../xray/templates/xray-unified-secret.yaml | 70 + .../local_dependancy_charts/xray/values.yaml | 1768 +++++++++++++++++ 87 files changed, 14345 insertions(+), 10 deletions(-) create mode 100644 .github/workflows/deploy-helm-chart.yml create mode 100644 stable/jfrog-platform/local_dependancy_charts/rabbitmq/.helmignore create mode 100644 stable/jfrog-platform/local_dependancy_charts/rabbitmq/CHANGELOG.md create mode 100644 stable/jfrog-platform/local_dependancy_charts/rabbitmq/Chart.lock create mode 100644 stable/jfrog-platform/local_dependancy_charts/rabbitmq/Chart.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/rabbitmq/README.md create mode 100644 stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/NOTES.txt create mode 100644 stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/_helpers.tpl create mode 100644 stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/config-secret.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/extra-list.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/ingress-tls-secrets.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/ingress.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/init-configmap.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/networkpolicy.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/pdb.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/prometheusrule.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/role.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/rolebinding.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/secrets.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/serviceaccount.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/servicemonitor.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/statefulset.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/svc-headless.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/svc.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/tls-secrets.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/validation.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/rabbitmq/values.schema.json create mode 100644 stable/jfrog-platform/local_dependancy_charts/rabbitmq/values.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/.helmignore create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/CHANGELOG.md create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/Chart.lock create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/Chart.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/LICENSE create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/README.md create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/ci/default-values.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/ci/global-section-values.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/ci/test-rabbitmq-haQuorum-values.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/ci/test-rabbitmq-replicaCount-values.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/ci/test-values.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/files/system.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/logo/xray-logo.png create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/rabbitmq/ha-quorum.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/rabbitmq/migration-to-ha-quorum.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/sizing/xray-2xlarge.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/sizing/xray-large.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/sizing/xray-medium.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/sizing/xray-small.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/sizing/xray-xlarge.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/sizing/xray-xsmall.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/NOTES.txt create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/_helpers.tpl create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/_system-yaml-render.tpl create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/additional-resources.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/catalog-db-create-hook.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/filebeat-configmap.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/keys-warnings.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/logger-configmap.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/migration-hook.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-configmaps.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-custom-secrets.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-database-secrets.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-hpa-ipa.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-hpa-server.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-hpa.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-ipa-deployment.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-ipa-svc.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-keda-hpa-ipa.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-keda-hpa-server.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-keda-hpa.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-keda-secret.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-keda-trigger-authentication.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-networkpolicy.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-pdb.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-priority-class.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-resourcequota.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-role.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-rolebinding.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-secrets.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-server-deployment.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-serviceaccount.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-statefulset.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-svc.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-system-yaml.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-unified-secret.yaml create mode 100644 stable/jfrog-platform/local_dependancy_charts/xray/values.yaml diff --git a/.github/workflows/deploy-helm-chart.yml b/.github/workflows/deploy-helm-chart.yml new file mode 100644 index 000000000..722a421b7 --- /dev/null +++ b/.github/workflows/deploy-helm-chart.yml @@ -0,0 +1,30 @@ +name: Deploy Helm Chart + +on: + workflow_dispatch: + pull_request: + branches: ["release"] + paths: + - stable/jfrog-platform/** + push: + branches: ["release"] + paths: + - stable/jfrog-platform/** + +jobs: + check: + runs-on: forge-amd64-dagger + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Checkout elevation-data + uses: actions/checkout@v4 + with: + repository: fastly/elevation-data + + - name: Conftest + uses: fastly/forge/actions/dagger@actions-v1 + with: + verb: call + module: .forge/dagger-modules/standard/registry/ + args: --vault-token="env:VAULT_TOKEN" helm elevation-conftest --chart ./jfrog-helm-charts/stable/jfrog-platform/ --additional-values ./elevation-data/workloads/dev-usc1/jfrog-platform/jfrog-platform.yaml diff --git a/stable/jfrog-platform/Chart.lock b/stable/jfrog-platform/Chart.lock index d2848b4b5..4beabfb12 100644 --- a/stable/jfrog-platform/Chart.lock +++ b/stable/jfrog-platform/Chart.lock @@ -3,14 +3,14 @@ dependencies: repository: https://charts.jfrog.io/ version: 15.5.20 - name: rabbitmq - repository: https://charts.jfrog.io/ - version: 11.9.3 + repository: file://local_dependancy_charts/rabbitmq + version: 14.6.6 - name: artifactory repository: https://charts.jfrog.io/ version: 107.104.10 - name: xray - repository: https://charts.jfrog.io/ - version: 103.107.21 + repository: file://local_dependancy_charts/xray + version: 103.111.15 - name: catalog repository: https://charts.jfrog.io/ version: 101.13.0 @@ -20,5 +20,5 @@ dependencies: - name: worker repository: https://charts.jfrog.io/ version: 101.118.0 -digest: sha256:d1a0a0f3cdf278643b2059fd78afda7b1e722602dd675186f30810f8eccdca7f -generated: "2025-02-26T18:21:48.98957-05:00" +digest: sha256:4289db49e8f4654cb464dbd65d218f44748b9f6878f8dfac385c10639bcbc28f +generated: "2025-03-02T14:43:40.121376-05:00" diff --git a/stable/jfrog-platform/Chart.yaml b/stable/jfrog-platform/Chart.yaml index 9e2a3c34f..51c93ad52 100644 --- a/stable/jfrog-platform/Chart.yaml +++ b/stable/jfrog-platform/Chart.yaml @@ -7,16 +7,16 @@ dependencies: version: 15.5.20 - condition: rabbitmq.enabled name: rabbitmq - repository: https://charts.jfrog.io/ - version: 11.9.3 + repository: file://local_dependancy_charts/rabbitmq + version: 14.6.6 - condition: artifactory.enabled name: artifactory repository: https://charts.jfrog.io/ version: 107.104.10 - condition: xray.enabled name: xray - repository: https://charts.jfrog.io/ - version: 103.107.21 + repository: file://local_dependancy_charts/xray + version: 103.111.15 - condition: catalog.enabled name: catalog repository: https://charts.jfrog.io/ diff --git a/stable/jfrog-platform/local_dependancy_charts/rabbitmq/.helmignore b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/.helmignore new file mode 100644 index 000000000..207983f36 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/.helmignore @@ -0,0 +1,25 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +# img folder +img/ +# Changelog +CHANGELOG.md diff --git a/stable/jfrog-platform/local_dependancy_charts/rabbitmq/CHANGELOG.md b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/CHANGELOG.md new file mode 100644 index 000000000..05cf36803 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/CHANGELOG.md @@ -0,0 +1,5 @@ +* Removed all the explicit namespace definition from the resources +* In template/statefulset.yaml added size limit for empty disk definition +volumes: + - name: empty-dir + sizeLimit: 1Gi \ No newline at end of file diff --git a/stable/jfrog-platform/local_dependancy_charts/rabbitmq/Chart.lock b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/Chart.lock new file mode 100644 index 000000000..1349f391c --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: oci://registry-1.docker.io/bitnamicharts + version: 2.20.5 +digest: sha256:5b98791747a148b9d4956b81bb8635f49a0ae831869d700d52e514b8fd1a2445 +generated: "2024-07-16T12:17:21.995344+02:00" diff --git a/stable/jfrog-platform/local_dependancy_charts/rabbitmq/Chart.yaml b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/Chart.yaml new file mode 100644 index 000000000..06dcc0cc8 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/Chart.yaml @@ -0,0 +1,31 @@ +annotations: + category: Infrastructure + images: | + - name: os-shell + image: docker.io/bitnami/os-shell:12-debian-12-r26 + - name: rabbitmq + image: docker.io/bitnami/rabbitmq:3.13.6-debian-12-r1 + licenses: Apache-2.0 +apiVersion: v2 +appVersion: 3.13.6 +dependencies: +- name: common + repository: oci://registry-1.docker.io/bitnamicharts + tags: + - bitnami-common + version: 2.x.x +description: RabbitMQ is an open source general-purpose message broker that is designed + for consistent, highly-available messaging scenarios (both synchronous and asynchronous). +home: https://bitnami.com +icon: https://bitnami.com/assets/stacks/rabbitmq/img/rabbitmq-stack-220x234.png +keywords: +- rabbitmq +- message queue +- AMQP +maintainers: +- name: Broadcom, Inc. All Rights Reserved. + url: https://github.com/bitnami/charts +name: rabbitmq +sources: +- https://github.com/bitnami/charts/tree/main/bitnami/rabbitmq +version: 14.6.6 diff --git a/stable/jfrog-platform/local_dependancy_charts/rabbitmq/README.md b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/README.md new file mode 100644 index 000000000..12d18928c --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/README.md @@ -0,0 +1,872 @@ + + +# Bitnami package for RabbitMQ + +RabbitMQ is an open source general-purpose message broker that is designed for consistent, highly-available messaging scenarios (both synchronous and asynchronous). + +[Overview of RabbitMQ](https://www.rabbitmq.com) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR + +```console +helm install my-release oci://registry-1.docker.io/bitnamicharts/rabbitmq +``` + +Looking to use RabbitMQ in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the commercial edition of the Bitnami catalog. + +## Introduction + +This chart bootstraps a [RabbitMQ](https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.23+ +- Helm 3.8.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install my-release oci://REGISTRY_NAME/REPOSITORY_NAME/rabbitmq +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + +The command deploys RabbitMQ on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Configuration and installation details + +### Resource requests and limits + +Bitnami charts allow setting resource requests and limits for all containers inside the chart deployment. These are inside the `resources` value (check parameter table). Setting requests is essential for production workloads and these should be adapted to your specific use case. + +To make this process easier, the chart contains the `resourcesPreset` values, which automatically sets the `resources` section according to different presets. Check these presets in [the bitnami/common chart](https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15). However, in production workloads using `resourcePreset` is discouraged as it may not fully adapt to your specific needs. Find more information on container resource management in the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). + +### [Rolling vs Immutable tags](https://docs.vmware.com/en/VMware-Tanzu-Application-Catalog/services/tutorials/GUID-understand-rolling-tags-containers-index.html) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Set pod affinity + +This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. + +### Scale horizontally + +To horizontally scale this chart once it has been deployed, two options are available: + +- Use the `kubectl scale` command. +- Upgrade the chart modifying the `replicaCount` parameter. + +```text + replicaCount=3 + auth.password="$RABBITMQ_PASSWORD" + auth.erlangCookie="$RABBITMQ_ERLANG_COOKIE" +``` + +> NOTE: It is mandatory to specify the password and Erlang cookie that was set the first time the chart was installed when upgrading the chart. Otherwise, new pods won't be able to join the cluster. + +When scaling down the solution, unnecessary RabbitMQ nodes are automatically stopped, but they are not removed from the cluster. These nodes must be manually removed via the `rabbitmqctl forget_cluster_node` command. + +For instance, if RabbitMQ was initially installed with three replicas and then scaled down to two replicas, run the commands below (assuming that the release name is `rabbitmq` and the clustering type is `hostname`): + +```console + kubectl exec rabbitmq-0 --container rabbitmq -- rabbitmqctl forget_cluster_node rabbit@rabbitmq-2.rabbitmq-headless.default.svc.cluster.local + kubectl delete pvc data-rabbitmq-2 +``` + +> NOTE: It is mandatory to specify the password and Erlang cookie that was set the first time the chart was installed when upgrading the chart. + +### Enable TLS support + +To enable TLS support, first generate the certificates as described in the [RabbitMQ documentation for SSL certificate generation](https://www.rabbitmq.com/ssl.html#automated-certificate-generation). + +Once the certificates are generated, you have two alternatives: + +- Create a secret with the certificates and associate the secret when deploying the chart +- Include the certificates in the *values.yaml* file when deploying the chart + +Set the *auth.tls.failIfNoPeerCert* parameter to *false* to allow a TLS connection if the client fails to provide a certificate. + +Set the *auth.tls.sslOptionsVerify* to *verify_peer* to force a node to perform peer verification. When set to *verify_none*, peer verification will be disabled and certificate exchange won't be performed. + +This chart also facilitates the creation of TLS secrets for use with the Ingress controller (although this is not mandatory). There are several common use cases: + +- Generate certificate secrets based on chart parameters. +- Enable externally generated certificates. +- Manage application certificates via an external service (like [cert-manager](https://github.com/jetstack/cert-manager/)). +- Create self-signed certificates within the chart (if supported). + +In the first two cases, a certificate and a key are needed. Files are expected in `.pem` format. + +Here is an example of a certificate file: + +> NOTE: There may be more than one certificate if there is a certificate chain. + +```text +-----BEGIN CERTIFICATE----- +MIID6TCCAtGgAwIBAgIJAIaCwivkeB5EMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV +... +jScrvkiBO65F46KioCL9h5tDvomdU1aqpI/CBzhvZn1c0ZTf87tGQR8NK7v7 +-----END CERTIFICATE----- +``` + +Here is an example of a certificate key: + +```text +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAvLYcyu8f3skuRyUgeeNpeDvYBCDcgq+LsWap6zbX5f8oLqp4 +... +wrj2wDbCDCFmfqnSJ+dKI3vFLlEz44sAV8jX/kd4Y6ZTQhlLbYc= +-----END RSA PRIVATE KEY----- +``` + +- If using Helm to manage the certificates based on the parameters, copy these values into the `certificate` and `key` values for a given `*.ingress.secrets` entry. +- If managing TLS secrets separately, it is necessary to create a TLS secret with name `INGRESS_HOSTNAME-tls` (where INGRESS_HOSTNAME is a placeholder to be replaced with the hostname you set using the `*.ingress.hostname` parameter). +- If your cluster has a [cert-manager](https://github.com/jetstack/cert-manager) add-on to automate the management and issuance of TLS certificates, add to `*.ingress.annotations` the [corresponding ones](https://cert-manager.io/docs/usage/ingress/#supported-annotations) for cert-manager. +- If using self-signed certificates created by Helm, set both `*.ingress.tls` and `*.ingress.selfSigned` to `true`. + +### Load custom definitions + +It is possible to [load a RabbitMQ definitions file to configure RabbitMQ](https://www.rabbitmq.com/management.html#load-definitions). Follow the steps below: + +Because definitions may contain RabbitMQ credentials, [store the JSON as a Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets-as-files-from-a-pod). Within the secret's data, choose a key name that corresponds with the desired load definitions filename (i.e. `load_definition.json`) and use the JSON object as the value. + +Next, specify the `load_definitions` property as an `extraConfiguration` pointing to the load definition file path within the container (i.e. `/app/load_definition.json`) and set `loadDefinition.enable` to `true`. Any load definitions specified will be available within in the container at `/app`. + +> NOTE: Loading a definition will take precedence over any configuration done through [Helm values](#parameters). + +If needed, you can use `extraSecrets` to let the chart create the secret for you. This way, you don't need to manually create it before deploying a release. These secrets can also be templated to use supplied chart values. Here is an example: + +```yaml +auth: + password: CHANGEME +extraSecrets: + load-definition: + load_definition.json: | + { + "users": [ + { + "name": "{{ .Values.auth.username }}", + "password": "{{ .Values.auth.password }}", + "tags": "administrator" + } + ], + "vhosts": [ + { + "name": "/" + } + ] + } +loadDefinition: + enabled: true + existingSecret: load-definition +extraConfiguration: | + load_definitions = /app/load_definition.json +``` + +### Configure LDAP support + +LDAP support can be enabled in the chart by specifying the `ldap.*` parameters while creating a release. For example: + +```text +ldap.enabled="true" +ldap.server="my-ldap-server" +ldap.port="389" +ldap.user_dn_pattern="cn=${username},dc=example,dc=org" +``` + +If `ldap.tls.enabled` is set to true, consider using `ldap.port=636` and checking the settings in the `advancedConfiguration` chart parameters. + +### Configure memory high watermark + +It is possible to configure a memory high watermark on RabbitMQ to define [memory thresholds](https://www.rabbitmq.com/memory.html#threshold) using the `memoryHighWatermark.*` parameters. To do so, you have two alternatives: + +- Set an absolute limit of RAM to be used on each RabbitMQ node, as shown in the configuration example below: + +```text +memoryHighWatermark.enabled="true" +memoryHighWatermark.type="absolute" +memoryHighWatermark.value="512Mi" +``` + +- Set a relative limit of RAM to be used on each RabbitMQ node. To enable this feature, define the memory limits at pod level too. An example configuration is shown below: + +```text +memoryHighWatermark.enabled="true" +memoryHighWatermark.type="relative" +memoryHighWatermark.value="0.4" +resources.limits.memory="2Gi" +``` + +### Add extra environment variables + +In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` property. + +```yaml +extraEnvVars: + - name: LOG_LEVEL + value: error +``` + +Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `.extraEnvVarsCM` or the `extraEnvVarsSecret` properties. + +### Configure the default user/vhost + +If you want to create default user/vhost and set the default permission. you can use `extraConfiguration`: + +```yaml +auth: + username: default-user +extraConfiguration: |- + default_vhost = default-vhost + default_permissions.configure = .* + default_permissions.read = .* + default_permissions.write = .* +``` + +### Use plugins + +The Bitnami Docker RabbitMQ image ships a set of plugins by default. By default, this chart enables `rabbitmq_management` and `rabbitmq_peer_discovery_k8s` since they are required for RabbitMQ to work on K8s. + +To enable extra plugins, set the `extraPlugins` parameter with the list of plugins you want to enable. In addition to this, the `communityPlugins` parameter can be used to specify a list of URLs (separated by spaces) for custom plugins for RabbitMQ. + +```text +communityPlugins="http://URL-TO-PLUGIN/" +extraPlugins="my-custom-plugin" +``` + +### Advanced logging + +In case you want to configure RabbitMQ logging set `logs` value to false and set the log config in extraConfiguration following the [official documentation](https://www.rabbitmq.com/logging.html#log-file-location). + +An example: + +```yaml +logs: false # custom logging +extraConfiguration: | + log.default.level = warning + log.file = false + log.console = true + log.console.level = warning + log.console.formatter = json +``` + +### How to Avoid Deadlocked Deployments After a Cluster-Wide Restart + +RabbitMQ nodes assume their peers come back online within five minutes (by default). When the `OrderedReady` pod management policy is used with a readiness probe that implicitly requires a fully booted node, the deployment can deadlock: + +- Kubernetes will expect the first node to pass a readiness probe +- The readiness probe may require a fully booted node +- The node will fully boot after it detects that its peers have come online +- Kubernetes will not start any more pods until the first one boots + +The following combination of deployment settings avoids the problem: + +- Use `podManagementPolicy: "Parallel"` to boot multiple cluster nodes in parallel +- Use `rabbitmq-diagnostics ping` for readiness probe + +To learn more, please consult RabbitMQ documentation guides: + +- [RabbitMQ Clustering guide: Node Restarts](https://www.rabbitmq.com/docs/clustering#restarting) +- [RabbitMQ Clustering guide: Restarts and Readiness Probes](https://www.rabbitmq.com/docs/clustering#restarting-readiness-probes) +- [Recommendations](https://www.rabbitmq.com/docs/cluster-formation#peer-discovery-k8s) for Operator-less (DIY) deployments to Kubernetes + +#### Do Not Force Boot Nodes on a Regular Basis + +Note that forcing nodes to boot is **not a solution** and doing so **can be dangerous**. Forced booting is a last resort mechanism in RabbitMQ that helps make remaining cluster nodes recover and rejoin each other after a permanent loss of some of their former peers. In other words, forced booting a node is an emergency event recovery procedure. + +### Known issues + +- Changing the password through RabbitMQ's UI can make the pod fail due to the default liveness probes. If you do so, remember to make the chart aware of the new password. Updating the default secret with the password you set through RabbitMQ's UI will automatically recreate the pods. If you are using your own secret, you may have to manually recreate the pods. + +## Persistence + +The [Bitnami RabbitMQ](https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq) image stores the RabbitMQ data and configurations at the `/opt/bitnami/rabbitmq/var/lib/rabbitmq/` path of the container. + +The chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) at this location. By default, the volume is created using dynamic volume provisioning. An existing PersistentVolumeClaim can also be defined. + +### Use existing PersistentVolumeClaims + +1. Create the PersistentVolume +2. Create the PersistentVolumeClaim +3. Install the chart + +```console +helm install my-release --set persistence.existingClaim=PVC_NAME oci://REGISTRY_NAME/REPOSITORY_NAME/rabbitmq +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + +### Adjust permissions of the persistence volume mountpoint + +As the image runs as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an `initContainer` to change the ownership of the volume before mounting it in the final destination. + +You can enable this `initContainer` by setting `volumePermissions.enabled` to `true`. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.defaultStorageClass` | Global default StorageClass for Persistent Volume(s) | `""` | +| `global.storageClass` | DEPRECATED: use global.defaultStorageClass instead | `""` | +| `global.compatibility.openshift.adaptSecurityContext` | Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) | `auto` | + +### RabbitMQ Image parameters + +| Name | Description | Value | +| ------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------- | +| `image.registry` | RabbitMQ image registry | `REGISTRY_NAME` | +| `image.repository` | RabbitMQ image repository | `REPOSITORY_NAME/rabbitmq` | +| `image.digest` | RabbitMQ image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | RabbitMQ image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Set to true if you would like to see extra information on logs | `false` | + +### Common parameters + +| Name | Description | Value | +| -------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------- | +| `nameOverride` | String to partially override rabbitmq.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override rabbitmq.fullname template | `""` | +| `namespaceOverride` | String to fully override common.names.namespace | `""` | +| `kubeVersion` | Force target Kubernetes version (using Helm capabilities if not set) | `""` | +| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `servicenameOverride` | String to partially override headless service name | `""` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `serviceBindings.enabled` | Create secret for service binding (Experimental) | `false` | +| `enableServiceLinks` | Whether information about services should be injected into pod's environment variable | `true` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` | +| `automountServiceAccountToken` | Mount Service Account token in pod | `true` | +| `hostAliases` | Deployment pod host aliases | `[]` | +| `dnsPolicy` | DNS Policy for pod | `""` | +| `dnsConfig` | DNS Configuration pod | `{}` | +| `auth.username` | RabbitMQ application username | `user` | +| `auth.password` | RabbitMQ application password | `""` | +| `auth.securePassword` | Whether to set the RabbitMQ password securely. This is incompatible with loading external RabbitMQ definitions and 'true' when not setting the auth.password parameter. | `true` | +| `auth.existingPasswordSecret` | Existing secret with RabbitMQ credentials (existing secret must contain a value for `rabbitmq-password` key or override with setting auth.existingSecretPasswordKey) | `""` | +| `auth.existingSecretPasswordKey` | Password key to be retrieved from existing secret | `rabbitmq-password` | +| `auth.enableLoopbackUser` | If enabled, the user `auth.username` can only connect from localhost | `false` | +| `auth.erlangCookie` | Erlang cookie to determine whether different nodes are allowed to communicate with each other | `""` | +| `auth.existingErlangSecret` | Existing secret with RabbitMQ Erlang cookie (must contain a value for `rabbitmq-erlang-cookie` key or override with auth.existingSecretErlangKey) | `""` | +| `auth.existingSecretErlangKey` | Erlang cookie key to be retrieved from existing secret | `rabbitmq-erlang-cookie` | +| `auth.tls.enabled` | Enable TLS support on RabbitMQ | `false` | +| `auth.tls.autoGenerated` | Generate automatically self-signed TLS certificates | `false` | +| `auth.tls.failIfNoPeerCert` | When set to true, TLS connection will be rejected if client fails to provide a certificate | `true` | +| `auth.tls.sslOptionsVerify` | Should [peer verification](https://www.rabbitmq.com/ssl.html#peer-verification) be enabled? | `verify_peer` | +| `auth.tls.sslOptionsPassword.enabled` | Enable usage of password for private Key | `false` | +| `auth.tls.sslOptionsPassword.existingSecret` | Name of existing Secret containing the sslOptionsPassword | `""` | +| `auth.tls.sslOptionsPassword.key` | Enable Key referring to sslOptionsPassword in Secret specified in auth.tls.sslOptionsPassword.existingSecret | `""` | +| `auth.tls.sslOptionsPassword.password` | Use this string as Password. If set, auth.tls.sslOptionsPassword.existingSecret and auth.tls.sslOptionsPassword.key are ignored | `""` | +| `auth.tls.caCertificate` | Certificate Authority (CA) bundle content | `""` | +| `auth.tls.serverCertificate` | Server certificate content | `""` | +| `auth.tls.serverKey` | Server private key content | `""` | +| `auth.tls.existingSecret` | Existing secret with certificate content to RabbitMQ credentials | `""` | +| `auth.tls.existingSecretFullChain` | Whether or not the existing secret contains the full chain in the certificate (`tls.crt`). Will be used in place of `ca.cert` if `true`. | `false` | +| `auth.tls.overrideCaCertificate` | Existing secret with certificate content be mounted instead of the `ca.crt` coming from caCertificate or existingSecret/existingSecretFullChain. | `""` | +| `logs` | Path of the RabbitMQ server's Erlang log file. Value for the `RABBITMQ_LOGS` environment variable | `-` | +| `ulimitNofiles` | RabbitMQ Max File Descriptors | `65535` | +| `maxAvailableSchedulers` | RabbitMQ maximum available scheduler threads | `""` | +| `onlineSchedulers` | RabbitMQ online scheduler threads | `""` | +| `memoryHighWatermark.enabled` | Enable configuring Memory high watermark on RabbitMQ | `false` | +| `memoryHighWatermark.type` | Memory high watermark type. Either `absolute` or `relative` | `relative` | +| `memoryHighWatermark.value` | Memory high watermark value | `0.4` | +| `plugins` | List of default plugins to enable (should only be altered to remove defaults; for additional plugins use `extraPlugins`) | `rabbitmq_management rabbitmq_peer_discovery_k8s` | +| `queue_master_locator` | Changes the queue_master_locator setting in the rabbitmq config file | `min-masters` | +| `communityPlugins` | List of Community plugins (URLs) to be downloaded during container initialization | `""` | +| `extraPlugins` | Extra plugins to enable (single string containing a space-separated list) | `rabbitmq_auth_backend_ldap` | +| `clustering.enabled` | Enable RabbitMQ clustering | `true` | +| `clustering.name` | RabbitMQ cluster name | `""` | +| `clustering.addressType` | Switch clustering mode. Either `ip` or `hostname` | `hostname` | +| `clustering.rebalance` | Rebalance master for queues in cluster when new replica is created | `false` | +| `clustering.forceBoot` | Force boot of an unexpectedly shut down cluster (in an unexpected order). | `false` | +| `clustering.partitionHandling` | Switch Partition Handling Strategy. Either `autoheal` or `pause_minority` or `pause_if_all_down` or `ignore` | `autoheal` | +| `loadDefinition.enabled` | Enable loading a RabbitMQ definitions file to configure RabbitMQ | `false` | +| `loadDefinition.file` | Name of the definitions file | `/app/load_definition.json` | +| `loadDefinition.existingSecret` | Existing secret with the load definitions file | `""` | +| `command` | Override default container command (useful when using custom images) | `[]` | +| `args` | Override default container args (useful when using custom images) | `[]` | +| `lifecycleHooks` | Overwrite livecycle for the RabbitMQ container(s) to automate configuration before or after startup | `{}` | +| `terminationGracePeriodSeconds` | Default duration in seconds k8s waits for container to exit before sending kill signal. | `120` | +| `extraEnvVars` | Extra environment variables to add to RabbitMQ pods | `[]` | +| `extraEnvVarsCM` | Name of existing ConfigMap containing extra environment variables | `""` | +| `extraEnvVarsSecret` | Name of existing Secret containing extra environment variables (in case of sensitive data) | `""` | +| `containerPorts.amqp` | | `5672` | +| `containerPorts.amqpTls` | | `5671` | +| `containerPorts.dist` | | `25672` | +| `containerPorts.manager` | | `15672` | +| `containerPorts.epmd` | | `4369` | +| `containerPorts.metrics` | | `9419` | +| `initScripts` | Dictionary of init scripts. Evaluated as a template. | `{}` | +| `initScriptsCM` | ConfigMap with the init scripts. Evaluated as a template. | `""` | +| `initScriptsSecret` | Secret containing `/docker-entrypoint-initdb.d` scripts to be executed at initialization time that contain sensitive data. Evaluated as a template. | `""` | +| `extraContainerPorts` | Extra ports to be included in container spec, primarily informational | `[]` | +| `configuration` | RabbitMQ Configuration file content: required cluster configuration | `""` | +| `tcpListenOptions.backlog` | Maximum size of the unaccepted TCP connections queue | `128` | +| `tcpListenOptions.nodelay` | When set to true, deactivates Nagle's algorithm. Default is true. Highly recommended for most users. | `true` | +| `tcpListenOptions.linger.lingerOn` | Enable Server socket lingering | `true` | +| `tcpListenOptions.linger.timeout` | Server Socket lingering timeout | `0` | +| `tcpListenOptions.keepalive` | When set to true, enables TCP keepalives | `false` | +| `configurationExistingSecret` | Existing secret with the configuration to use as rabbitmq.conf. | `""` | +| `extraConfiguration` | Configuration file content: extra configuration to be appended to RabbitMQ configuration | `""` | +| `extraConfigurationExistingSecret` | Existing secret with the extra configuration to append to `configuration`. | `""` | +| `advancedConfiguration` | Configuration file content: advanced configuration | `""` | +| `advancedConfigurationExistingSecret` | Existing secret with the advanced configuration file (must contain a key `advanced.config`). | `""` | +| `featureFlags` | that controls what features are considered to be enabled or available on all cluster nodes. | `""` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.uri` | LDAP connection string. | `""` | +| `ldap.servers` | List of LDAP servers hostnames. This is valid only if ldap.uri is not set | `[]` | +| `ldap.port` | LDAP servers port. This is valid only if ldap.uri is not set | `""` | +| `ldap.userDnPattern` | Pattern used to translate the provided username into a value to be used for the LDAP bind. | `""` | +| `ldap.binddn` | DN of the account used to search in the LDAP server. | `""` | +| `ldap.bindpw` | Password for binddn account. | `""` | +| `ldap.basedn` | Base DN path where binddn account will search for the users. | `""` | +| `ldap.uidField` | Field used to match with the user name (uid, samAccountName, cn, etc). It matches with 'dn_lookup_attribute' in RabbitMQ configuration | `""` | +| `ldap.uidField` | Field used to match with the user name (uid, samAccountName, cn, etc). It matches with 'dn_lookup_attribute' in RabbitMQ configuration | `""` | +| `ldap.authorisationEnabled` | Enable LDAP authorisation. Please set 'advancedConfiguration' with tag, topic, resources and vhost mappings | `false` | +| `ldap.tls.enabled` | Enabled TLS configuration. | `false` | +| `ldap.tls.startTls` | Use STARTTLS instead of LDAPS. | `false` | +| `ldap.tls.skipVerify` | Skip any SSL verification (hostanames or certificates) | `false` | +| `ldap.tls.verify` | Verify connection. Valid values are 'verify_peer' or 'verify_none' | `verify_peer` | +| `ldap.tls.certificatesMountPath` | Where LDAP certifcates are mounted. | `/opt/bitnami/rabbitmq/ldap/certs` | +| `ldap.tls.certificatesSecret` | Secret with LDAP certificates. | `""` | +| `ldap.tls.CAFilename` | CA certificate filename. Should match with the CA entry key in the ldap.tls.certificatesSecret. | `""` | +| `ldap.tls.certFilename` | Client certificate filename to authenticate against the LDAP server. Should match with certificate the entry key in the ldap.tls.certificatesSecret. | `""` | +| `ldap.tls.certKeyFilename` | Client Key filename to authenticate against the LDAP server. Should match with certificate the entry key in the ldap.tls.certificatesSecret. | `""` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts | `[]` | +| `extraVolumes` | Optionally specify extra list of additional volumes . | `[]` | +| `extraSecrets` | Optionally specify extra secrets to be created by the chart. | `{}` | +| `extraSecretsPrependReleaseName` | Set this flag to true if extraSecrets should be created with prepended. | `false` | + +### Statefulset parameters + +| Name | Description | Value | +| --------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- | +| `replicaCount` | Number of RabbitMQ replicas to deploy | `1` | +| `schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `podManagementPolicy` | Pod management policy | `OrderedReady` | +| `podLabels` | RabbitMQ Pod labels. Evaluated as a template | `{}` | +| `podAnnotations` | RabbitMQ Pod annotations. Evaluated as a template | `{}` | +| `updateStrategy.type` | Update strategy type for RabbitMQ statefulset | `RollingUpdate` | +| `statefulsetLabels` | RabbitMQ statefulset labels. Evaluated as a template | `{}` | +| `statefulsetAnnotations` | RabbitMQ statefulset annotations. Evaluated as a template | `{}` | +| `priorityClassName` | Name of the priority class to be used by RabbitMQ pods, priority class needs to be created beforehand | `""` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | Affinity for pod assignment. Evaluated as a template | `{}` | +| `nodeSelector` | Node labels for pod assignment. Evaluated as a template | `{}` | +| `tolerations` | Tolerations for pod assignment. Evaluated as a template | `[]` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `podSecurityContext.enabled` | Enable RabbitMQ pods' Security Context | `true` | +| `podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `podSecurityContext.fsGroup` | Set RabbitMQ pod's Security Context fsGroup | `1001` | +| `containerSecurityContext.enabled` | Enabled RabbitMQ containers' Security Context | `true` | +| `containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` | +| `containerSecurityContext.runAsUser` | Set RabbitMQ containers' Security Context runAsUser | `1001` | +| `containerSecurityContext.runAsGroup` | Set RabbitMQ containers' Security Context runAsGroup | `1001` | +| `containerSecurityContext.runAsNonRoot` | Set RabbitMQ container's Security Context runAsNonRoot | `true` | +| `containerSecurityContext.allowPrivilegeEscalation` | Set container's privilege escalation | `false` | +| `containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `true` | +| `containerSecurityContext.capabilities.drop` | Set container's Security Context runAsNonRoot | `["ALL"]` | +| `containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` | +| `resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `livenessProbe.enabled` | Enable livenessProbe | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `120` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `30` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `20` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readinessProbe.enabled` | Enable readinessProbe | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `10` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `30` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `20` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `startupProbe.enabled` | Enable startupProbe | `false` | +| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` | +| `startupProbe.periodSeconds` | Period seconds for startupProbe | `30` | +| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `20` | +| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `3` | +| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `customLivenessProbe` | Override default liveness probe | `{}` | +| `customReadinessProbe` | Override default readiness probe | `{}` | +| `customStartupProbe` | Define a custom startup probe | `{}` | +| `initContainers` | Add init containers to the RabbitMQ pod | `[]` | +| `sidecars` | Add sidecar containers to the RabbitMQ pod | `[]` | +| `pdb.create` | Enable/disable a Pod Disruption Budget creation | `true` | +| `pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `""` | +| `pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `pdb.minAvailable` and `pdb.maxUnavailable` are empty. | `""` | + +### RBAC parameters + +| Name | Description | Value | +| --------------------------------------------- | ------------------------------------------------------------------------------------------ | ------- | +| `serviceAccount.create` | Enable creation of ServiceAccount for RabbitMQ pods | `true` | +| `serviceAccount.name` | Name of the created serviceAccount | `""` | +| `serviceAccount.automountServiceAccountToken` | Auto-mount the service account token in the pod | `false` | +| `serviceAccount.annotations` | Annotations for service account. Evaluated as a template. Only used if `create` is `true`. | `{}` | +| `rbac.create` | Whether RBAC rules should be created | `true` | +| `rbac.rules` | Custom RBAC rules | `[]` | + +### Persistence parameters + +| Name | Description | Value | +| -------------------------------------------------- | ------------------------------------------------------------------------------ | ---------------------------------------- | +| `persistence.enabled` | Enable RabbitMQ data persistence using PVC | `true` | +| `persistence.storageClass` | PVC Storage Class for RabbitMQ data volume | `""` | +| `persistence.selector` | Selector to match an existing Persistent Volume | `{}` | +| `persistence.accessModes` | PVC Access Modes for RabbitMQ data volume | `["ReadWriteOnce"]` | +| `persistence.existingClaim` | Provide an existing PersistentVolumeClaims | `""` | +| `persistence.mountPath` | The path the volume will be mounted at | `/opt/bitnami/rabbitmq/.rabbitmq/mnesia` | +| `persistence.subPath` | The subdirectory of the volume to mount to | `""` | +| `persistence.size` | PVC Storage Request for RabbitMQ data volume | `8Gi` | +| `persistence.annotations` | Persistence annotations. Evaluated as a template | `{}` | +| `persistence.labels` | Persistence labels. Evaluated as a template | `{}` | +| `persistentVolumeClaimRetentionPolicy.enabled` | Enable Persistent volume retention policy for rabbitmq Statefulset | `false` | +| `persistentVolumeClaimRetentionPolicy.whenScaled` | Volume retention behavior when the replica count of the StatefulSet is reduced | `Retain` | +| `persistentVolumeClaimRetentionPolicy.whenDeleted` | Volume retention behavior that applies when the StatefulSet is deleted | `Retain` | + +### Exposure parameters + +| Name | Description | Value | +| --------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.portEnabled` | Amqp port. Cannot be disabled when `auth.tls.enabled` is `false`. Listener can be disabled with `listeners.tcp = none`. | `true` | +| `service.distPortEnabled` | Erlang distribution server port | `true` | +| `service.managerPortEnabled` | RabbitMQ Manager port | `true` | +| `service.epmdPortEnabled` | RabbitMQ EPMD Discovery service port | `true` | +| `service.ports.amqp` | Amqp service port | `5672` | +| `service.ports.amqpTls` | Amqp TLS service port | `5671` | +| `service.ports.dist` | Erlang distribution service port | `25672` | +| `service.ports.manager` | RabbitMQ Manager service port | `15672` | +| `service.ports.metrics` | RabbitMQ Prometheues metrics service port | `9419` | +| `service.ports.epmd` | EPMD Discovery service port | `4369` | +| `service.portNames.amqp` | Amqp service port name | `amqp` | +| `service.portNames.amqpTls` | Amqp TLS service port name | `amqp-tls` | +| `service.portNames.dist` | Erlang distribution service port name | `dist` | +| `service.portNames.manager` | RabbitMQ Manager service port name | `http-stats` | +| `service.portNames.metrics` | RabbitMQ Prometheues metrics service port name | `metrics` | +| `service.portNames.epmd` | EPMD Discovery service port name | `epmd` | +| `service.nodePorts.amqp` | Node port for Ampq | `""` | +| `service.nodePorts.amqpTls` | Node port for Ampq TLS | `""` | +| `service.nodePorts.dist` | Node port for Erlang distribution | `""` | +| `service.nodePorts.manager` | Node port for RabbitMQ Manager | `""` | +| `service.nodePorts.epmd` | Node port for EPMD Discovery | `""` | +| `service.nodePorts.metrics` | Node port for RabbitMQ Prometheues metrics | `""` | +| `service.extraPorts` | Extra ports to expose in the service | `[]` | +| `service.extraPortsHeadless` | Extra ports to expose in the headless service | `[]` | +| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is `LoadBalancer` | `[]` | +| `service.allocateLoadBalancerNodePorts` | Whether to allocate node ports when service type is LoadBalancer | `true` | +| `service.externalIPs` | Set the ExternalIPs | `[]` | +| `service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `service.loadBalancerClass` | Set the LoadBalancerClass | `""` | +| `service.loadBalancerIP` | Set the LoadBalancerIP | `""` | +| `service.clusterIP` | Kubernetes service Cluster IP | `""` | +| `service.labels` | Service labels. Evaluated as a template | `{}` | +| `service.annotations` | Service annotations. Evaluated as a template | `{}` | +| `service.annotationsHeadless` | Headless Service annotations. Evaluated as a template | `{}` | +| `service.headless.annotations` | Annotations for the headless service. | `{}` | +| `service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | +| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `ingress.enabled` | Enable ingress resource for Management console | `false` | +| `ingress.path` | Path for the default host. You may need to set this to '/*' in order to use this with ALB ingress controllers. | `/` | +| `ingress.pathType` | Ingress path type | `ImplementationSpecific` | +| `ingress.hostname` | Default host for the ingress resource | `rabbitmq.local` | +| `ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` | +| `ingress.tls` | Enable TLS configuration for the hostname defined at `ingress.hostname` parameter | `false` | +| `ingress.selfSigned` | Set this to true in order to create a TLS secret for this ingress record | `false` | +| `ingress.extraHosts` | The list of additional hostnames to be covered with this ingress record. | `[]` | +| `ingress.extraPaths` | An array with additional arbitrary paths that may need to be added to the ingress under the main host | `[]` | +| `ingress.extraRules` | The list of additional rules to be added to this ingress record. Evaluated as a template | `[]` | +| `ingress.extraTls` | The tls configuration for additional hostnames to be covered with this ingress record. | `[]` | +| `ingress.secrets` | Custom TLS certificates as secrets | `[]` | +| `ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` | +| `ingress.existingSecret` | It is you own the certificate as secret. | `""` | +| `networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `true` | +| `networkPolicy.kubeAPIServerPorts` | List of possible endpoints to kube-apiserver (limit to your cluster settings to increase security) | `[]` | +| `networkPolicy.allowExternal` | Don't require server label for connections | `true` | +| `networkPolicy.allowExternalEgress` | Allow the pod to access any range of port and all destinations. | `true` | +| `networkPolicy.addExternalClientAccess` | Allow access from pods with client label set to "true". Ignored if `networkPolicy.allowExternal` is true. | `true` | +| `networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `networkPolicy.ingressPodMatchLabels` | Labels to match to allow traffic from other pods. Ignored if `networkPolicy.allowExternal` is true. | `{}` | +| `networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces. Ignored if `networkPolicy.allowExternal` is true. | `{}` | +| `networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces. Ignored if `networkPolicy.allowExternal` is true. | `{}` | + +### Metrics Parameters + +| Name | Description | Value | +| ------------------------------------------ | -------------------------------------------------------------------------------------- | --------------------- | +| `metrics.enabled` | Enable exposing RabbitMQ metrics to be gathered by Prometheus | `false` | +| `metrics.plugins` | Plugins to enable Prometheus metrics in RabbitMQ | `rabbitmq_prometheus` | +| `metrics.podAnnotations` | Annotations for enabling prometheus to access the metrics endpoint | `{}` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using PrometheusOperator | `false` | +| `metrics.serviceMonitor.namespace` | Specify the namespace in which the serviceMonitor resource will be created | `""` | +| `metrics.serviceMonitor.interval` | Specify the interval at which metrics should be scraped | `30s` | +| `metrics.serviceMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping. | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricsRelabelConfigs to apply to samples before ingestion. | `[]` | +| `metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels | `false` | +| `metrics.serviceMonitor.targetLabels` | Used to keep given service's labels in target | `{}` | +| `metrics.serviceMonitor.podTargetLabels` | Used to keep given pod's labels in target | `{}` | +| `metrics.serviceMonitor.path` | Define the path used by ServiceMonitor to scrap metrics | `""` | +| `metrics.serviceMonitor.params` | Define the HTTP URL parameters used by ServiceMonitor | `{}` | +| `metrics.serviceMonitor.selector` | ServiceMonitor selector labels | `{}` | +| `metrics.serviceMonitor.labels` | Extra labels for the ServiceMonitor | `{}` | +| `metrics.serviceMonitor.annotations` | Extra annotations for the ServiceMonitor | `{}` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | `""` | +| `metrics.prometheusRule.rules` | List of rules, used as template by Helm. | `[]` | + +### Init Container Parameters + +| Name | Description | Value | +| ----------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `REGISTRY_NAME` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `REPOSITORY_NAME/os-shell` | +| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `volumePermissions.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). | `nano` | +| `volumePermissions.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `volumePermissions.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` | +| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | + +The above parameters map to the env variables defined in [bitnami/rabbitmq](https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq). For more information please refer to the [bitnami/rabbitmq](https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq) image documentation. + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +helm install my-release \ + --set auth.username=admin,auth.password=secretpassword,auth.erlangCookie=secretcookie \ + oci://REGISTRY_NAME/REPOSITORY_NAME/rabbitmq +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + +The above command sets the RabbitMQ admin username and password to `admin` and `secretpassword` respectively. Additionally the secure erlang cookie is set to `secretcookie`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +helm install my-release -f values.yaml oci://REGISTRY_NAME/REPOSITORY_NAME/rabbitmq +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. +> **Tip**: You can use the default [values.yaml](https://github.com/bitnami/charts/tree/main/bitnami/rabbitmq/values.yaml) + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +It's necessary to set the `auth.password` and `auth.erlangCookie` parameters when upgrading for readiness/liveness probes to work properly. When you install this chart for the first time, some notes will be displayed providing the credentials you must use under the 'Credentials' section. Please note down the password and the cookie, and run the command below to upgrade your chart: + +```console +helm upgrade my-release oci://REGISTRY_NAME/REPOSITORY_NAME/rabbitmq --set auth.password=[PASSWORD] --set auth.erlangCookie=[RABBITMQ_ERLANG_COOKIE] +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + +| Note: you need to substitute the placeholders [PASSWORD] and [RABBITMQ_ERLANG_COOKIE] with the values obtained in the installation notes. + +### To 14.0.0 + +This major version changes the default RabbitMQ image from 3.12.x to 3.13.x. Follow the [official instructions](https://www.rabbitmq.com/upgrade.html) to upgrade from 3.12 to 3.13. + +### To 13.0.0 + +This major bump changes the following security defaults: + +- `runAsGroup` is changed from `0` to `1001` +- `readOnlyRootFilesystem` is set to `true` +- `resourcesPreset` is changed from `none` to the minimum size working in our test suites (NOTE: `resourcesPreset` is not meant for production usage, but `resources` adapted to your use case). +- `global.compatibility.openshift.adaptSecurityContext` is changed from `disabled` to `auto`. + +This could potentially break any customization or init scripts used in your deployment. If this is the case, change the default values to the previous ones. + +### To 12.10.0 + +This version adds NetworkPolicy objects by default. Its default configuration is setting open `egress` (this can be changed by setting `networkPolicy.allowExternalEgress=false`) and limited `ingress` to the default container ports. If you have any extra port exposed you may need to set the `networkPolicy.extraIngress` value. In the example below an extra port is exposed using `extraContainerPorts` and access is allowed using `networkPolicy.extraIngress`: + +```yaml + extraContainerPorts: + - name: "mqtts" + protocol: "TCP" + containerPort: 8883 + networkPolicy: + extraIngress: + - ports: + - protocol: "TCP" + containerPort: 8883 + port: 8883 +``` + +You can revert this behavior by setting `networkPolicy.enabled=false`. + +### To 11.0.0 + +This major version changes the default RabbitMQ image from 3.10.x to 3.11.x. Follow the [official instructions](https://www.rabbitmq.com/upgrade.html) to upgrade from 3.10 to 3.11. + +### To 10.0.0 + +This major version changes the default RabbitMQ image from 3.9.x to 3.10.x. Follow the [official instructions](https://www.rabbitmq.com/upgrade.html) to upgrade from 3.9 to 3.10. + +### To 9.0.0 + +This major release renames several values in this chart and adds missing features, in order to be aligned with the rest of the assets in the Bitnami charts repository. + + .dist + .manager + .metrics + .epmd + +- `service.port` has been renamed as `service.ports.amqp`. +- `service.portName` has been renamed as `service.portNames.amqp`. +- `service.nodePort`has been renamed as `service.nodePorts.amqp`. +- `service.tlsPort` has been renamed as `service.ports.amqpTls`. +- `service.tlsPortName` has been renamed as `service.portNames.amqpTls`. +- `service.tlsNodePort` has been renamed as `service.nodePorts.amqpTls`. +- `service.epmdPortName` has been renamed as `service.portNames.epmd`. +- `service.epmdNodePort` has been renamed as `service.nodePorts.epmd`. +- `service.distPort` has been renamed as `service.ports.dist`. +- `service.distPortName` has been renamed as `service.portNames.dist`. +- `service.distNodePort` has been renamed as `service.nodePorts.dist`. +- `service.managerPort` has been renamed as `service.ports.manager`. +- `service.managerPortName` has been renamed as `service.portNames.manager`. +- `service.managerNodePort` has been renamed as `service.nodePorts.manager`. +- `service.metricsPort` has been renamed as `service.ports.metrics`. +- `service.metricsPortName` has been renamed as `service.portNames.metrics`. +- `service.metricsNodePort` has been renamed as `service.nodePorts.metrics`. +- `persistence.volumes` has been removed, as it duplicates the parameter `extraVolumes`. +- `ingress.certManager` has been removed. +- `metrics.serviceMonitor.relabellings` has been replaced with `metrics.serviceMonitor.relabelings`, and it sets the field `relabelings` instead of `metricRelabelings`. +- `metrics.serviceMonitor.additionalLabels` has been renamed as `metrics.serviceMonitor.labels` +- `updateStrategyType` has been removed, use the field `updateStrategy` instead, which is interpreted as a template. +- The content of `podSecurityContext` and `containerSecurityContext` have been modified. +- The behavior of VolumePermissions has been modified to not change ownership of '.snapshot' and 'lost+found' +- Introduced the values `ContainerPorts.*`, separating the service and container ports configuration. + +### To 8.21.0 + +This new version of the chart bumps the RabbitMQ version to `3.9.1`. It is considered a minor release, and no breaking changes are expected. Additionally, RabbitMQ `3.9.X` nodes can run alongside `3.8.X` nodes. + +See the [Upgrading guide](https://www.rabbitmq.com/upgrade.html) and the [RabbitMQ change log](https://www.rabbitmq.com/changelog.html) for further documentation. + +### To 8.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +### To 7.0.0 + +- Several parameters were renamed or disappeared in favor of new ones on this major version: + - `replicas` is renamed to `replicaCount`. + - `securityContext.*` is deprecated in favor of `podSecurityContext` and `containerSecurityContext`. + - Authentication parameters were reorganized under the `auth.*` parameter: + - `rabbitmq.username`, `rabbitmq.password`, and `rabbitmq.erlangCookie` are now `auth.username`, `auth.password`, and `auth.erlangCookie` respectively. + - `rabbitmq.tls.*` parameters are now under `auth.tls.*`. + - Parameters prefixed with `rabbitmq.` were renamed removing the prefix. E.g. `rabbitmq.configuration` -> renamed to `configuration`. + - `rabbitmq.rabbitmqClusterNodeName` is deprecated. + - `rabbitmq.setUlimitNofiles` is deprecated. + - `forceBoot.enabled` is renamed to `clustering.forceBoot`. + - `loadDefinition.secretName` is renamed to `loadDefinition.existingSecret`. + - `metics.port` is remamed to `service.metricsPort`. + - `service.extraContainerPorts` is renamed to `extraContainerPorts`. + - `service.nodeTlsPort` is renamed to `service.tlsNodePort`. + - `podDisruptionBudget` is deprecated in favor of `pdb.create`, `pdb.minAvailable`, and `pdb.maxUnavailable`. + - `rbacEnabled` -> deprecated in favor of `rbac.create`. + - New parameters: `serviceAccount.create`, and `serviceAccount.name`. + - New parameters: `memoryHighWatermark.enabled`, `memoryHighWatermark.type`, and `memoryHighWatermark.value`. +- Chart labels and Ingress configuration were adapted to follow the Helm charts best practices. +- Initialization logic now relies on the container. +- This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/main/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. +- The layout of the persistent volumes has changed (if using persistence). Action is required if preserving data through the upgrade is desired: + - The data has moved from `mnesia/` within the persistent volume to the root of the persistent volume + - The `config/` and `schema/` directories within the persistent volume are no longer used + - An init container can be used to move and clean up the peristent volumes. An example can be found [here](https://github.com/bitnami/charts/issues/10913#issuecomment-1169619513). + - Alternately the value `persistence.subPath` can be overridden to be `mnesia` so that the directory layout is consistent with what it was previously. + - Note however that this will leave the unused `config/` and `schema/` directories within the peristent volume forever. + +Consequences: + +- Backwards compatibility is not guaranteed. +- Compatibility with non Bitnami images is not guaranteed anymore. + +### To 6.0.0 + +This new version updates the RabbitMQ image to a [new version based on bash instead of node.js](https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#3715-r18-3715-ol-7-r19). However, since this Chart overwrites the container's command, the changes to the container shouldn't affect the Chart. To upgrade, it may be needed to enable the `fastBoot` option, as it is already the case from upgrading from 5.X to 5.Y. + +### To 5.0.0 + +This major release changes the clustering method from `ip` to `hostname`. +This change is needed to fix the persistence. The data dir will now depend on the hostname which is stable instead of the pod IP that might change. + +> IMPORTANT: Note that if you upgrade from a previous version you will lose your data. + +### To 3.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 3.0.0. The following example assumes that the release name is rabbitmq: + +```console +kubectl delete statefulset rabbitmq --cascade=false +``` + +## Bitnami Kubernetes Documentation + +Bitnami Kubernetes documentation is available at [https://docs.bitnami.com/](https://docs.bitnami.com/). You can find there the following resources: + +- [Documentation for RabbitMQ Helm chart](https://github.com/bitnami/charts/tree/main/bitnami/rabbitmq) +- [Get Started with Kubernetes guides](https://docs.bitnami.com/kubernetes/) +- [Kubernetes FAQs](https://docs.bitnami.com/kubernetes/faq/) +- [Kubernetes Developer guides](https://docs.vmware.com/en/VMware-Tanzu-Application-Catalog/services/tutorials/GUID-index.html) + +## License + +Copyright © 2024 Broadcom. The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/NOTES.txt b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/NOTES.txt new file mode 100644 index 000000000..727775f21 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/NOTES.txt @@ -0,0 +1,156 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +{{- $servicePort := or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) | ternary .Values.service.ports.amqp .Values.service.ports.amqpTls -}} + +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/rabbitmq/entrypoint.sh /opt/bitnami/scripts/rabbitmq/run.sh + +{{- else }} + +Credentials: + +{{- if not .Values.loadDefinition.enabled }} + echo "Username : {{ .Values.auth.username }}" + echo "Password : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "rabbitmq.secretPasswordName" . }} -o jsonpath="{.data.rabbitmq-password}" | base64 -d)" +{{- end }} + echo "ErLang Cookie : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "rabbitmq.secretErlangName" . }} -o jsonpath="{.data.rabbitmq-erlang-cookie}" | base64 -d)" + +Note that the credentials are saved in persistent volume claims and will not be changed upon upgrade or reinstallation unless the persistent volume claim has been deleted. If this is not the first installation of this chart, the credentials may not be valid. +This is applicable when no passwords are set and therefore the random password is autogenerated. In case of using a fixed password, you should specify it when upgrading. +More information about the credentials may be found at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases. + +RabbitMQ can be accessed within the cluster on port {{ $servicePort }} at {{ include "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + +To access for outside the cluster, perform the following steps: + +{{- if .Values.ingress.enabled }} +{{- if contains "NodePort" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + +1. Obtain the NodePort IP and ports: + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT_AMQP=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[?(@.name=='amqp')].nodePort}" services {{ include "common.names.fullname" . }}) + echo "URL : amqp://$NODE_IP:$NODE_PORT_AMQP/" + +{{- else if contains "LoadBalancer" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + +1. Obtain the LoadBalancer IP: + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "common.names.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "common.names.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + echo "URL : amqp://$SERVICE_IP:{{ $servicePort }}/" + +{{- else if contains "ClusterIP" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + +1. Create a port-forward to the AMQP port: + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "common.names.fullname" . }} {{ $servicePort }}:{{ $servicePort }} & + echo "URL : amqp://127.0.0.1:{{ $servicePort }}/" + +{{- end }} + +2. Access RabbitMQ using using the obtained URL. + +To Access the RabbitMQ Management interface: + +1. Get the RabbitMQ Management URL and associate its hostname to your cluster external IP: + + export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters + echo "RabbitMQ Management: http{{ if .Values.ingress.tls }}s{{ end }}://{{ .Values.ingress.hostname }}/" + echo "$CLUSTER_IP {{ .Values.ingress.hostname }}" | sudo tee -a /etc/hosts + +2. Open a browser and access RabbitMQ Management using the obtained URL. + +{{- else }} +{{- if contains "NodePort" .Values.service.type }} + +Obtain the NodePort IP and ports: + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT_AMQP=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[?(@.name=='amqp')].nodePort}" services {{ include "common.names.fullname" . }}) + export NODE_PORT_STATS=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[?(@.name=='http-stats')].nodePort}" services {{ include "common.names.fullname" . }}) + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://$NODE_IP:$NODE_PORT_AMQP/" + +To Access the RabbitMQ Management interface: + + echo "URL : http://$NODE_IP:$NODE_PORT_STATS/" + +{{- else if contains "LoadBalancer" .Values.service.type }} + +Obtain the LoadBalancer IP: + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "common.names.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "common.names.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://$SERVICE_IP:{{ $servicePort }}/" + +To Access the RabbitMQ Management interface: + + echo "URL : http://$SERVICE_IP:{{ .Values.service.ports.manager }}/" + +{{- else if contains "ClusterIP" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://127.0.0.1:{{ $servicePort }}/" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "common.names.fullname" . }} {{ $servicePort }}:{{ $servicePort }} + +To Access the RabbitMQ Management interface: + + echo "URL : http://127.0.0.1:{{ .Values.service.ports.manager }}/" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "common.names.fullname" . }} {{ .Values.service.ports.manager }}:{{ .Values.service.ports.manager }} + +{{- end }} +{{- end }} + +{{- if .Values.metrics.enabled }} + +To access the RabbitMQ Prometheus metrics, get the RabbitMQ Prometheus URL by running: + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "common.names.fullname" . }} {{ .Values.service.ports.metrics }}:{{ .Values.service.ports.metrics }} & + echo "Prometheus Metrics URL: http://127.0.0.1:{{ .Values.service.ports.metrics }}/metrics" + +Then, open the obtained URL in a browser. + +{{- end }} + +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} + +{{- end }} +{{- include "common.warnings.resources" (dict "sections" (list "" "volumePermissions") "context" $) }} +{{- include "common.warnings.modifiedImages" (dict "images" (list .Values.image .Values.volumePermissions.image) "context" $) }} \ No newline at end of file diff --git a/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/_helpers.tpl b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/_helpers.tpl new file mode 100644 index 000000000..69f14a92d --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/_helpers.tpl @@ -0,0 +1,319 @@ +{{/* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the proper RabbitMQ image name +*/}} +{{- define "rabbitmq.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "rabbitmq.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "rabbitmq.imagePullSecrets" -}} +{{ include "common.images.renderPullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "context" $) }} +{{- end -}} + +{{/* + Create the name of the service account to use + */}} +{{- define "rabbitmq.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get RabbitMQ password secret name. +*/}} +{{- define "rabbitmq.secretPasswordName" -}} + {{- if .Values.auth.existingPasswordSecret -}} + {{- printf "%s" (tpl .Values.auth.existingPasswordSecret $) -}} + {{- else -}} + {{- printf "%s" (include "common.names.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Get the password key to be retrieved from RabbitMQ secret. +*/}} +{{- define "rabbitmq.secretPasswordKey" -}} + {{- if and .Values.auth.existingPasswordSecret .Values.auth.existingSecretPasswordKey -}} + {{- printf "%s" (tpl .Values.auth.existingSecretPasswordKey $) -}} + {{- else -}} + {{- printf "rabbitmq-password" -}} + {{- end -}} +{{- end -}} + +{{/* +Return RabbitMQ password +*/}} +{{- define "rabbitmq.password" -}} + {{- if not (empty .Values.auth.password) -}} + {{- .Values.auth.password -}} + {{- else -}} + {{- include "getValueFromSecret" (dict "Namespace" (include "common.names.namespace" .) "Name" (include "rabbitmq.secretPasswordName" .) "Length" 16 "Key" (include "rabbitmq.secretPasswordKey" .)) -}} + {{- end -}} +{{- end }} + +{{/* +Get the erlang secret. +*/}} +{{- define "rabbitmq.secretErlangName" -}} + {{- if .Values.auth.existingErlangSecret -}} + {{- printf "%s" (tpl .Values.auth.existingErlangSecret $) -}} + {{- else -}} + {{- printf "%s" (include "common.names.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Get the erlang cookie key to be retrieved from RabbitMQ secret. +*/}} +{{- define "rabbitmq.secretErlangKey" -}} + {{- if and .Values.auth.existingErlangSecret .Values.auth.existingSecretErlangKey -}} + {{- printf "%s" (tpl .Values.auth.existingSecretErlangKey $) -}} + {{- else -}} + {{- printf "rabbitmq-erlang-cookie" -}} + {{- end -}} +{{- end -}} + +{{/* +Return RabbitMQ erlang cookie secret +*/}} +{{- define "rabbitmq.erlangCookie" -}} + {{- if not (empty .Values.auth.erlangCookie) -}} + {{- .Values.auth.erlangCookie -}} + {{- else -}} + {{- include "getValueFromSecret" (dict "Namespace" (include "common.names.namespace" .) "Name" (include "rabbitmq.secretErlangName" .) "Length" 32 "Key" (include "rabbitmq.secretErlangKey" .)) -}} + {{- end -}} +{{- end }} + +{{/* +Get the TLS secret. +*/}} +{{- define "rabbitmq.tlsSecretName" -}} + {{- if .Values.auth.tls.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.tls.existingSecret $) -}} + {{- else -}} + {{- printf "%s-certs" (include "common.names.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Return true if a TLS credentials secret object should be created +*/}} +{{- define "rabbitmq.createTlsSecret" -}} +{{- if and .Values.auth.tls.enabled (not .Values.auth.tls.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper RabbitMQ plugin list +*/}} +{{- define "rabbitmq.plugins" -}} +{{- $plugins := .Values.plugins -}} +{{- if .Values.extraPlugins -}} +{{- $plugins = printf "%s %s" $plugins .Values.extraPlugins -}} +{{- end -}} +{{- if .Values.metrics.enabled -}} +{{- $plugins = printf "%s %s" $plugins .Values.metrics.plugins -}} +{{- end -}} +{{- printf "%s" $plugins | replace " " ", " -}} +{{- end -}} + +{{/* +Return the number of bytes given a value +following a base 2 or base 10 number system. +Input can be: b | B | k | K | m | M | g | G | Ki | Mi | Gi +Or number without suffix (then the number gets interpreted as bytes) +Usage: +{{ include "rabbitmq.toBytes" .Values.path.to.the.Value }} +*/}} +{{- define "rabbitmq.toBytes" -}} + {{- $si := . -}} + {{- if not (typeIs "string" . ) -}} + {{- $si = int64 $si | toString -}} + {{- end -}} + {{- $bytes := 0 -}} + {{- if or (hasSuffix "B" $si) (hasSuffix "b" $si) -}} + {{- $bytes = $si | trimSuffix "B" | trimSuffix "b" | float64 | floor -}} + {{- else if or (hasSuffix "K" $si) (hasSuffix "k" $si) -}} + {{- $raw := $si | trimSuffix "K" | trimSuffix "k" | float64 -}} + {{- $bytes = mulf $raw (mul 1000) | floor -}} + {{- else if or (hasSuffix "M" $si) (hasSuffix "m" $si) -}} + {{- $raw := $si | trimSuffix "M" | trimSuffix "m" | float64 -}} + {{- $bytes = mulf $raw (mul 1000 1000) | floor -}} + {{- else if or (hasSuffix "G" $si) (hasSuffix "g" $si) -}} + {{- $raw := $si | trimSuffix "G" | trimSuffix "g" | float64 -}} + {{- $bytes = mulf $raw (mul 1000 1000 1000) | floor -}} + {{- else if hasSuffix "Ki" $si -}} + {{- $raw := $si | trimSuffix "Ki" | float64 -}} + {{- $bytes = mulf $raw (mul 1024) | floor -}} + {{- else if hasSuffix "Mi" $si -}} + {{- $raw := $si | trimSuffix "Mi" | float64 -}} + {{- $bytes = mulf $raw (mul 1024 1024) | floor -}} + {{- else if hasSuffix "Gi" $si -}} + {{- $raw := $si | trimSuffix "Gi" | float64 -}} + {{- $bytes = mulf $raw (mul 1024 1024 1024) | floor -}} + {{- else if (mustRegexMatch "^[0-9]+$" $si) -}} + {{- $bytes = $si -}} + {{- else -}} + {{- printf "\n%s is invalid SI quantity\nSuffixes can be: b | B | k | K | m | M | g | G | Ki | Mi | Gi or without any Suffixes" $si | fail -}} + {{- end -}} + {{- $bytes | int64 -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "rabbitmq.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.ldap" .) -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.memoryHighWatermark" .) -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.ingress.tls" .) -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.auth.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - LDAP support +*/}} +{{- define "rabbitmq.validateValues.ldap" -}} +{{- if .Values.ldap.enabled }} +{{- $serversListLength := len .Values.ldap.servers }} +{{- $userDnPattern := coalesce .Values.ldap.user_dn_pattern .Values.ldap.userDnPattern }} +{{- if or (and (not (gt $serversListLength 0)) (empty .Values.ldap.uri)) (and (not $userDnPattern) (not .Values.ldap.basedn)) }} +rabbitmq: LDAP + Invalid LDAP configuration. When enabling LDAP support, the parameters "ldap.servers" or "ldap.uri" are mandatory + to configure the connection and "ldap.userDnPattern" or "ldap.basedn" are necessary to lookup the users. Please provide them: + $ helm install {{ .Release.Name }} oci://registry-1.docker.io/bitnamicharts/rabbitmq \ + --set ldap.enabled=true \ + --set ldap.servers[0]=my-ldap-server" \ + --set ldap.port="389" \ + --set ldap.userDnPattern="cn=${username},dc=example,dc=org" +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - Memory high watermark +*/}} +{{- define "rabbitmq.validateValues.memoryHighWatermark" -}} +{{- if and (not (eq .Values.memoryHighWatermark.type "absolute")) (not (eq .Values.memoryHighWatermark.type "relative")) }} +rabbitmq: memoryHighWatermark.type + Invalid Memory high watermark type. Valid values are "absolute" and + "relative". Please set a valid mode (--set memoryHighWatermark.type="xxxx") +{{- else if and .Values.memoryHighWatermark.enabled (not (dig "limits" "memory" "" .Values.resources)) }} +rabbitmq: memoryHighWatermark + You enabled configuring memory high watermark using a relative limit. However, + no memory limits were defined at POD level. Define your POD limits as shown below: + + $ helm install {{ .Release.Name }} oci://registry-1.docker.io/bitnamicharts/rabbitmq \ + --set memoryHighWatermark.enabled=true \ + --set memoryHighWatermark.type="relative" \ + --set memoryHighWatermark.value="0.4" \ + --set resources.limits.memory="2Gi" + + Altenatively, user an absolute value for the memory memory high watermark : + + $ helm install {{ .Release.Name }} oci://registry-1.docker.io/bitnamicharts/rabbitmq \ + --set memoryHighWatermark.enabled=true \ + --set memoryHighWatermark.type="absolute" \ + --set memoryHighWatermark.value="512MB" +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - TLS configuration for Ingress +*/}} +{{- define "rabbitmq.validateValues.ingress.tls" -}} +{{- if and .Values.ingress.enabled .Values.ingress.tls (not (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations ))) (not .Values.ingress.selfSigned) (not .Values.ingress.existingSecret) (empty .Values.ingress.extraTls) }} +rabbitmq: ingress.tls + You enabled the TLS configuration for the default ingress hostname but + you did not enable any of the available mechanisms to create the TLS secret + to be used by the Ingress Controller. + Please use any of these alternatives: + - Use the `ingress.extraTls` and `ingress.secrets` parameters to provide your custom TLS certificates. + - Use the `ingress.existingSecret` to provide your custom TLS certificates. + - Rely on cert-manager to create it by setting the corresponding annotations + - Rely on Helm to create self-signed certificates by setting `ingress.selfSigned=true` +{{- end -}} +{{- end -}} + +{{/* +Validate values of RabbitMQ - Auth TLS enabled +*/}} +{{- define "rabbitmq.validateValues.auth.tls" -}} +{{- if and .Values.auth.tls.enabled (not .Values.auth.tls.autoGenerated) (not .Values.auth.tls.existingSecret) (not .Values.auth.tls.caCertificate) (not .Values.auth.tls.serverCertificate) (not .Values.auth.tls.serverKey) }} +rabbitmq: auth.tls + You enabled TLS for RabbitMQ but you did not enable any of the available mechanisms to create the TLS secret. + Please use any of these alternatives: + - Provide an existing secret containing the TLS certificates using `auth.tls.existingSecret` + - Provide the plain text certificates using `auth.tls.caCertificate`, `auth.tls.serverCertificate` and `auth.tls.serverKey`. + - Enable auto-generated certificates using `auth.tls.autoGenerated`. +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts volume name. +*/}} +{{- define "rabbitmq.initScripts" -}} +{{- printf "%s-init-scripts" (include "common.names.fullname" .) -}} +{{- end -}} + +{{/* +Returns the available value for certain key in an existing secret (if it exists), +otherwise it generates a random value. +*/}} +{{- define "getValueFromSecret" }} + {{- $len := (default 16 .Length) | int -}} + {{- $obj := (lookup "v1" "Secret" .Namespace .Name).data -}} + {{- if $obj }} + {{- index $obj .Key | trimAll "\"" | b64dec -}} + {{- else -}} + {{- randAlphaNum $len -}} + {{- end -}} +{{- end }} + +{{/* +Get the extraConfigurationExistingSecret secret. +*/}} +{{- define "rabbitmq.extraConfiguration" -}} +{{- if not (empty .Values.extraConfigurationExistingSecret) -}} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" .Values.extraConfigurationExistingSecret "Length" 10 "Key" "extraConfiguration") -}} +{{- else -}} + {{- tpl .Values.extraConfiguration . -}} +{{- end -}} +{{- end -}} + +{{/* +Get the TLS.sslOptions.Password secret. +*/}} +{{- define "rabbitmq.tlsSslOptionsPassword" -}} +{{- if not (empty .Values.auth.tls.sslOptionsPassword.password) -}} + {{- .Values.auth.tls.sslOptionsPassword.password -}} +{{- else -}} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" .Values.auth.tls.sslOptionsPassword.existingSecret "Length" 10 "Key" .Values.auth.tls.sslOptionsPassword.key) -}} +{{- end -}} +{{- end -}} diff --git a/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/config-secret.yaml b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/config-secret.yaml new file mode 100644 index 000000000..9fa0a6f70 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/config-secret.yaml @@ -0,0 +1,25 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if or (empty .Values.configurationExistingSecret) .Values.advancedConfiguration }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-config" (include "common.names.fullname" .) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if empty .Values.configurationExistingSecret }} + rabbitmq.conf: |- + {{- include "common.tplvalues.render" (dict "value" .Values.configuration "context" $) | b64enc | nindent 4 }} + {{- end }} + {{- if .Values.advancedConfiguration }} + advanced.config: |- + {{- include "common.tplvalues.render" (dict "value" .Values.advancedConfiguration "context" $) | b64enc | nindent 4 }} + {{- end }} +{{- end }} diff --git a/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/extra-list.yaml b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/extra-list.yaml new file mode 100644 index 000000000..329f5c653 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/extra-list.yaml @@ -0,0 +1,9 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/ingress-tls-secrets.yaml b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/ingress-tls-secrets.yaml new file mode 100644 index 000000000..37570a079 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/ingress-tls-secrets.yaml @@ -0,0 +1,42 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.ingress.enabled }} +{{- if .Values.ingress.secrets }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $.Values.commonLabels "context" $ ) | nindent 4 }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} +{{- if and .Values.ingress.tls .Values.ingress.selfSigned (not .Values.ingress.existingSecret) }} +{{- $secretName := printf "%s-tls" .Values.ingress.hostname }} +{{- $ca := genCA "rabbitmq-ca" 365 }} +{{- $cert := genSignedCert .Values.ingress.hostname nil (list .Values.ingress.hostname) 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} +{{- end }} +{{- end }} diff --git a/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/ingress.yaml b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/ingress.yaml new file mode 100644 index 000000000..1c44f36c6 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/ingress.yaml @@ -0,0 +1,58 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.ingress.enabled }} +apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ include "common.names.fullname" . }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if or .Values.ingress.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.ingress.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.ingressClassName (eq "true" (include "common.ingress.supportsIngressClassname" .)) }} + ingressClassName: {{ .Values.ingress.ingressClassName | quote }} + {{- end }} + rules: + {{- if .Values.ingress.hostname }} + - host: {{ include "common.tplvalues.render" ( dict "value" .Values.ingress.hostname "context" $ ) }} + http: + paths: + {{- if .Values.ingress.extraPaths }} + {{- toYaml .Values.ingress.extraPaths | nindent 10 }} + {{- end }} + - path: {{ .Values.ingress.path }} + {{- if eq "true" (include "common.ingress.supportsPathType" .) }} + pathType: {{ .Values.ingress.pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" .) "servicePort" .Values.service.portNames.manager "context" $) | nindent 14 }} + {{- end }} + {{- range .Values.ingress.extraHosts }} + - host: {{ include "common.tplvalues.render" ( dict "value" .name "context" $ ) }} + http: + paths: + - path: {{ default "/" .path }} + {{- if eq "true" (include "common.ingress.supportsPathType" $) }} + pathType: {{ default "ImplementationSpecific" .pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" $) "servicePort" $.Values.service.portNames.manager "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.ingress.extraRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraRules "context" $) | nindent 4 }} + {{- end }} + {{- if or (and .Values.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations )) .Values.ingress.selfSigned .Values.ingress.existingSecret)) .Values.ingress.extraTls }} + tls: + {{- if and .Values.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations )) .Values.ingress.selfSigned .Values.ingress.existingSecret) }} + - hosts: + - {{ .Values.ingress.hostname | quote }} + secretName: {{ default (printf "%s-tls" .Values.ingress.hostname | trunc 63 | trimSuffix "-") .Values.ingress.existingSecret }} + {{- end }} + {{- if .Values.ingress.extraTls }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraTls "context" $) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/init-configmap.yaml b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/init-configmap.yaml new file mode 100644 index 000000000..b294f47ac --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/init-configmap.yaml @@ -0,0 +1,17 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.initScripts }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-init-scripts" (include "common.names.fullname" .) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- include "common.tplvalues.render" ( dict "value" .Values.initScripts "context" $ ) | nindent 4 }} +{{- end }} diff --git a/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/networkpolicy.yaml b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/networkpolicy.yaml new file mode 100644 index 000000000..a2299167a --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/networkpolicy.yaml @@ -0,0 +1,91 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ include "common.names.fullname" . }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + policyTypes: + - Ingress + - Egress + {{- if .Values.networkPolicy.allowExternalEgress }} + egress: + - {} + {{- else }} + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + {{- if .Values.rbac.create }} + # Allow access to kube-apiserver + {{- range $port := .Values.networkPolicy.kubeAPIServerPorts }} + - port: {{ $port }} + {{- end }} + {{- end }} + # Allow internal communications between nodes + - ports: + - port: {{ .Values.service.ports.epmd }} + - port: {{ .Values.service.ports.amqp }} + - port: {{ .Values.service.ports.amqpTls }} + - port: {{ .Values.service.ports.dist }} + - port: {{ .Values.service.ports.manager }} + to: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + {{- if .Values.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.rts.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} + ingress: + # Allow inbound connections to RabbitMQ + - ports: + - port: {{ .Values.containerPorts.epmd }} + - port: {{ .Values.containerPorts.amqp }} + - port: {{ .Values.containerPorts.amqpTls }} + - port: {{ .Values.containerPorts.dist }} + - port: {{ .Values.containerPorts.manager }} + {{- if .Values.metrics.enabled }} + - port: {{ .Values.containerPorts.metrics }} + {{- end }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 14 }} + {{- if .Values.networkPolicy.addExternalClientAccess }} + - podSelector: + matchLabels: + {{ template "common.names.fullname" . }}-client: "true" + {{- end }} + {{- if .Values.networkPolicy.ingressPodMatchLabels }} + - podSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressPodMatchLabels "context" $ ) | nindent 14 }} + {{- end }} + {{- if .Values.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressNSMatchLabels "context" $ ) | nindent 14 }} + {{- if .Values.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.ingressNSPodMatchLabels "context" $ ) | nindent 14 }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/pdb.yaml b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/pdb.yaml new file mode 100644 index 000000000..277943d61 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/pdb.yaml @@ -0,0 +1,25 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "common.names.fullname" . }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if or .Values.pdb.maxUnavailable (not .Values.pdb.minAvailable) }} + maxUnavailable: {{ .Values.pdb.maxUnavailable | default 1 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} +{{- end }} diff --git a/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/prometheusrule.yaml b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/prometheusrule.yaml new file mode 100644 index 000000000..c201cde0e --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/prometheusrule.yaml @@ -0,0 +1,24 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "common.names.fullname" . }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + {{- with .Values.metrics.prometheusRule.rules }} + - name: {{ template "common.names.name" $ }} + rules: {{- include "common.tplvalues.render" (dict "value" . "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/role.yaml b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/role.yaml new file mode 100644 index 000000000..371325a1e --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/role.yaml @@ -0,0 +1,25 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.rbac.create }} +kind: Role +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ printf "%s-endpoint-reader" (include "common.names.fullname" .) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create"] + {{- if .Values.rbac.rules }} + {{- include "common.tplvalues.render" ( dict "value" .Values.rbac.rules "context" $ ) | nindent 2 }} + {{- end }} +{{- end }} diff --git a/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/rolebinding.yaml b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/rolebinding.yaml new file mode 100644 index 000000000..be050a09d --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/rolebinding.yaml @@ -0,0 +1,22 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.rbac.create }} +kind: RoleBinding +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ printf "%s-endpoint-reader" (include "common.names.fullname" .) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +subjects: + - kind: ServiceAccount + name: {{ template "rabbitmq.serviceAccountName" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ printf "%s-endpoint-reader" (include "common.names.fullname" .) }} +{{- end }} diff --git a/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/secrets.yaml b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/secrets.yaml new file mode 100644 index 000000000..71e437082 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/secrets.yaml @@ -0,0 +1,61 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- $host := printf "%s.%s.svc.%s" (include "common.names.fullname" .) (include "common.names.namespace" .) .Values.clusterDomain }} +{{- $port := print .Values.service.ports.amqp }} +{{- $user := print .Values.auth.username }} +{{- $password := include "rabbitmq.password" . }} +{{- $erlangCookie := include "rabbitmq.erlangCookie" . }} +{{- if or (not .Values.auth.existingErlangSecret) (not .Values.auth.existingPasswordSecret) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if (not .Values.auth.existingPasswordSecret) }} + rabbitmq-password: {{ print $password | b64enc | quote }} + {{- end }} + {{- if (not .Values.auth.existingErlangSecret ) }} + rabbitmq-erlang-cookie: {{ print $erlangCookie | b64enc | quote }} + {{- end }} +{{- end }} +{{- range $key, $value := .Values.extraSecrets }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ ternary (printf "%s-%s" $.Release.Name $key) $key $.Values.extraSecretsPrependReleaseName }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $.Values.commonLabels "context" $ ) | nindent 4 }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +stringData: {{- include "common.tplvalues.render" (dict "value" $value "context" $) | nindent 2 }} +{{- end }} +{{- if .Values.serviceBindings.enabled }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }}-svcbind + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: servicebinding.io/rabbitmq +data: + provider: {{ print "bitnami" | b64enc | quote }} + type: {{ print "rabbitmq" | b64enc | quote }} + host: {{ print $host | b64enc | quote }} + port: {{ print $port | b64enc | quote }} + username: {{ print $user | b64enc | quote }} + password: {{ print $password | b64enc | quote }} + uri: {{ printf "amqp://%s:%s@%s:%s" $user $password $host $port | b64enc | quote }} +{{- end }} diff --git a/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/serviceaccount.yaml b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/serviceaccount.yaml new file mode 100644 index 000000000..8bdc88101 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/serviceaccount.yaml @@ -0,0 +1,20 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "rabbitmq.serviceAccountName" . }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if or .Values.serviceAccount.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.serviceAccount.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +secrets: + - name: {{ template "rabbitmq.secretPasswordName" . }} +{{- end }} + diff --git a/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/servicemonitor.yaml b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/servicemonitor.yaml new file mode 100644 index 000000000..a05e980e9 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/servicemonitor.yaml @@ -0,0 +1,56 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "common.names.fullname" . }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.serviceMonitor.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + {{- if or .Values.metrics.serviceMonitor.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.serviceMonitor.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel | quote }} + endpoints: + - port: metrics + {{- if .Values.metrics.serviceMonitor.path }} + path: {{ .Values.metrics.serviceMonitor.path }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.params }} + params: {{ toYaml .Values.metrics.serviceMonitor.params | nindent 8 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 8 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ include "common.names.namespace" . | quote }} + {{- if .Values.metrics.serviceMonitor.podTargetLabels }} + podTargetLabels: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.podTargetLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.targetLabels }} + targetLabels: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.targetLabels "context" $) | nindent 4 }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} +{{- end }} diff --git a/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/statefulset.yaml b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/statefulset.yaml new file mode 100644 index 000000000..b0295184a --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/statefulset.yaml @@ -0,0 +1,515 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "common.names.fullname" . }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.statefulsetLabels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + {{- if or .Values.statefulsetAnnotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.statefulsetAnnotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + serviceName: {{ printf "%s-%s" (include "common.names.fullname" .) (default "headless" .Values.servicenameOverride) }} + podManagementPolicy: {{ .Values.podManagementPolicy }} + replicas: {{ .Values.replicaCount }} + {{- if .Values.updateStrategy }} + updateStrategy: {{- toYaml .Values.updateStrategy | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + template: + metadata: + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 8 }} + {{- end }} + checksum/config: {{ include (print $.Template.BasePath "/config-secret.yaml") . | sha256sum }} + {{- if (include "rabbitmq.createTlsSecret" . ) }} + checksum/config: {{ include (print $.Template.BasePath "/tls-secrets.yaml") . | sha256sum }} + {{- end }} + {{- if or (not .Values.auth.existingErlangSecret) (not .Values.auth.existingPasswordSecret) .Values.extraSecrets }} + checksum/secret: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.initScripts }} + checksum/initScripts: {{ include (print $.Template.BasePath "/init-configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "rabbitmq.imagePullSecrets" . | nindent 6 }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ template "rabbitmq.serviceAccountName" . }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" .) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + automountServiceAccountToken: {{ .Values.automountServiceAccountToken }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.podSecurityContext "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: {{- include "common.tplvalues.render" (dict "value" .Values.dnsConfig "context" .) | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + enableServiceLinks: {{ .Values.enableServiceLinks }} + initContainers: + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + - name: volume-permissions + image: {{ include "rabbitmq.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + args: + - -ec + - | + mkdir -p "{{ .Values.persistence.mountPath }}" + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown `id -u`:`id -G | cut -d " " -f2` {{ .Values.persistence.mountPath }} + {{- else }} + chown "{{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}" "{{ .Values.persistence.mountPath }}" + {{- end }} + find "{{ .Values.persistence.mountPath }}" -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + xargs -r chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs -r chown -R "{{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}" + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- else if ne .Values.volumePermissions.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.volumePermissions.resourcesPreset) | nindent 12 }} + {{- end }} + volumeMounts: + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir + - name: data + mountPath: {{ .Values.persistence.mountPath }} + {{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- end }} + - name: prepare-plugins-dir + image: {{ template "rabbitmq.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- else if ne .Values.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.resourcesPreset) | nindent 12 }} + {{- end }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + command: + - /bin/bash + args: + - -ec + - | + #!/bin/bash + + . /opt/bitnami/scripts/liblog.sh + + info "Copying plugins dir to empty dir" + # In order to not break the possibility of installing custom plugins, we need + # to make the plugins directory writable, so we need to copy it to an empty dir volume + cp -r --preserve=mode /opt/bitnami/rabbitmq/plugins/ /emptydir/app-plugins-dir + volumeMounts: + - name: empty-dir + mountPath: /emptydir + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} + {{- end }} + containers: + - name: rabbitmq + image: {{ template "rabbitmq.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.containerSecurityContext "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }} + {{- else }} + lifecycle: + preStop: + exec: + command: + - /bin/bash + - -ec + - | + if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then + /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t {{ .Values.terminationGracePeriodSeconds | quote }} -d {{ ternary "true" "false" .Values.image.debug | quote }} + else + rabbitmqctl stop_app + fi + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- $svcName := printf "%s-%s" (include "common.names.fullname" .) (default "headless" .Values.servicenameOverride) }} + {{- if .Values.featureFlags }} + - name: RABBITMQ_FEATURE_FLAGS + value: {{ .Values.featureFlags }} + {{- end }} + - name: RABBITMQ_FORCE_BOOT + value: {{ ternary "yes" "no" .Values.clustering.forceBoot | quote }} + {{- if (eq "hostname" .Values.clustering.addressType) }} + - name: RABBITMQ_NODE_NAME + value: "rabbit@$(MY_POD_NAME).{{ $svcName }}.$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}" + {{- else }} + - name: RABBITMQ_NODE_NAME + value: "rabbit@$(MY_POD_NAME)" + {{- end }} + - name: RABBITMQ_MNESIA_DIR + value: "{{ .Values.persistence.mountPath }}/$(RABBITMQ_NODE_NAME)" + - name: RABBITMQ_LDAP_ENABLE + value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} + {{- if .Values.ldap.enabled }} + - name: RABBITMQ_LDAP_TLS + value: {{ ternary "yes" "no" .Values.ldap.tls.enabled | quote }} + - name: RABBITMQ_LDAP_SERVERS + value: {{ .Values.ldap.servers | join "," | quote }} + - name: RABBITMQ_LDAP_SERVERS_PORT + value: {{ .Values.ldap.port | quote }} + - name: RABBITMQ_LDAP_USER_DN_PATTERN + value: {{ .Values.ldap.user_dn_pattern }} + {{- end }} + {{- if .Values.logs }} + - name: RABBITMQ_LOGS + value: {{ .Values.logs | quote }} + {{- end }} + - name: RABBITMQ_ULIMIT_NOFILES + value: {{ .Values.ulimitNofiles | quote }} + {{- if and .Values.maxAvailableSchedulers }} + - name: RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS + value: {{ printf "+S %s:%s" (toString .Values.maxAvailableSchedulers) (toString .Values.onlineSchedulers) -}} + {{- end }} + - name: RABBITMQ_USE_LONGNAME + value: "true" + - name: RABBITMQ_ERL_COOKIE + valueFrom: + secretKeyRef: + name: {{ template "rabbitmq.secretErlangName" . }} + key: {{ template "rabbitmq.secretErlangKey" . }} + {{- if and .Values.clustering.rebalance (gt (.Values.replicaCount | int) 1) }} + - name: RABBITMQ_CLUSTER_REBALANCE + value: "true" + {{- end }} + - name: RABBITMQ_LOAD_DEFINITIONS + value: {{ ternary "yes" "no" .Values.loadDefinition.enabled | quote }} + - name: RABBITMQ_DEFINITIONS_FILE + value: {{ .Values.loadDefinition.file | quote }} + - name: RABBITMQ_SECURE_PASSWORD + value: {{ ternary "yes" "no" (or .Values.auth.securePassword (not .Values.auth.password)) | quote }} + - name: RABBITMQ_USERNAME + value: {{ .Values.auth.username | quote }} + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "rabbitmq.secretPasswordName" . }} + key: {{ template "rabbitmq.secretPasswordKey" . }} + - name: RABBITMQ_PLUGINS + value: {{ include "rabbitmq.plugins" . | quote }} + {{- if .Values.communityPlugins }} + - name: RABBITMQ_COMMUNITY_PLUGINS + value: {{ .Values.communityPlugins | quote }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }} + {{- end }} + ports: + - name: amqp + containerPort: {{ .Values.containerPorts.amqp }} + - name: dist + containerPort: {{ .Values.containerPorts.dist }} + - name: stats + containerPort: {{ .Values.containerPorts.manager }} + - name: epmd + containerPort: {{ .Values.containerPorts.epmd }} + - name: metrics + containerPort: {{ .Values.containerPorts.metrics }} + {{- if .Values.auth.tls.enabled }} + - name: amqp-tls + containerPort: {{ .Values.containerPorts.amqpTls }} + {{- end }} + {{- if .Values.extraContainerPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraContainerPorts "context" $) | nindent 12 }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.livenessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - sh + - -ec + {{- if or (.Values.loadDefinition.enabled) (not (contains "rabbitmq_management" .Values.plugins )) }} + - rabbitmq-diagnostics -q ping + {{- else }} + - curl -f --user {{ .Values.auth.username }}:$RABBITMQ_PASSWORD 127.0.0.1:{{ .Values.containerPorts.manager }}/api/health/checks/virtual-hosts + {{- end }} + {{- end }} + {{- if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - sh + - -ec + {{- if or (.Values.loadDefinition.enabled) (not (contains "rabbitmq_management" .Values.plugins )) }} + - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms + {{- else }} + - curl -f --user {{ .Values.auth.username }}:$RABBITMQ_PASSWORD 127.0.0.1:{{ .Values.containerPorts.manager }}/api/health/checks/local-alarms + {{- end }} + {{- end }} + {{- if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: {{ternary "amqp-tls" "amqp" .Values.auth.tls.enabled }} + {{- end }} + {{- end }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- else if ne .Values.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.resourcesPreset) | nindent 12 }} + {{- end }} + volumeMounts: + - name: configuration + mountPath: /bitnami/rabbitmq/conf + - name: empty-dir + mountPath: /tmp + subPath: tmp-dir + - name: empty-dir + mountPath: /opt/bitnami/rabbitmq/etc/rabbitmq + subPath: app-conf-dir + - name: empty-dir + mountPath: /opt/bitnami/rabbitmq/var/lib/rabbitmq + subPath: app-tmp-dir + - name: empty-dir + mountPath: /opt/bitnami/rabbitmq/.rabbitmq/ + subPath: app-erlang-cookie + - name: empty-dir + mountPath: /opt/bitnami/rabbitmq/var/log/rabbitmq + subPath: app-logs-dir + - name: empty-dir + mountPath: /opt/bitnami/rabbitmq/plugins + subPath: app-plugins-dir + - name: data + mountPath: {{ .Values.persistence.mountPath }} + {{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: certs + mountPath: /opt/bitnami/rabbitmq/certs + {{- end }} + {{- if and .Values.ldap.tls.enabled .Values.ldap.tls.certificatesSecret }} + - name: ldap-certs + mountPath: {{ .Values.ldap.tls.certificatesMountPath }} + {{- end }} + {{- if .Values.loadDefinition.enabled }} + - name: load-definition-volume + mountPath: /app + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/init-scripts + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + mountPath: /docker-entrypoint-initdb.d/init-scripts-cm + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/init-scripts-secret + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + - name: empty-dir + emptyDir: + sizeLimit: 1Gi + {{- if .Values.auth.tls.enabled }} + - name: certs + projected: + sources: + - secret: + name: {{ template "rabbitmq.tlsSecretName" . }} + items: + {{- if not .Values.auth.tls.overrideCaCertificate }} + - key: {{ ternary "tls.crt" "ca.crt" .Values.auth.tls.existingSecretFullChain }} + path: ca_certificate.pem + {{- end }} + - key: tls.crt + path: server_certificate.pem + - key: tls.key + path: server_key.pem + {{- if .Values.auth.tls.overrideCaCertificate }} + - secret: + name: {{ .Values.auth.tls.overrideCaCertificate }} + items: + - key: ca.crt + path: ca_certificate.pem + {{- end }} + {{- end }} + {{- if and .Values.ldap.tls.enabled .Values.ldap.tls.certificatesSecret }} + - name: ldap-certs + secret: + secretName: {{ .Values.ldap.tls.certificatesSecret }} + {{- end }} + - name: configuration + projected: + sources: + {{- if or (and (empty .Values.configurationExistingSecret) .Values.configuration) (and (not .Values.advancedConfigurationExistingSecret) .Values.advancedConfiguration) }} + - secret: + name: {{ printf "%s-config" (include "common.names.fullname" .) }} + {{- end }} + {{- if and .Values.advancedConfigurationExistingSecret (not .Values.advancedConfiguration) }} + - secret: + name: {{ tpl .Values.advancedConfigurationExistingSecret . | quote }} + {{- end }} + {{- if not (empty .Values.configurationExistingSecret) }} + - secret: + name: {{ tpl .Values.configurationExistingSecret . | quote }} + {{- end }} + {{- if .Values.loadDefinition.enabled }} + - name: load-definition-volume + secret: + secretName: {{ tpl .Values.loadDefinition.existingSecret . | quote }} + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "rabbitmq.initScripts" . }} + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + configMap: + name: {{ tpl .Values.initScriptsCM . | quote }} + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ tpl .Values.initScriptsSecret . | quote }} + defaultMode: 0755 + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- else if .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + {{- with .Values.persistence.existingClaim }} + claimName: {{ tpl . $ }} + {{- end }} + {{- else }} + {{- if .Values.persistentVolumeClaimRetentionPolicy.enabled }} + persistentVolumeClaimRetentionPolicy: + whenDeleted: {{ .Values.persistentVolumeClaimRetentionPolicy.whenDeleted }} + whenScaled: {{ .Values.persistentVolumeClaimRetentionPolicy.whenScaled }} + {{- end }} + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: data + {{- $claimLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.persistence.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.matchLabels" ( dict "customLabels" $claimLabels "context" $ ) | nindent 10 }} + {{- if .Values.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) }} + {{- if .Values.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- end }} diff --git a/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/svc-headless.yaml b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/svc-headless.yaml new file mode 100644 index 000000000..23e3ba289 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/svc-headless.yaml @@ -0,0 +1,44 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-%s" (include "common.names.fullname" .) (default "headless" .Values.servicenameOverride) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if or .Values.service.annotationsHeadless .Values.commonAnnotations .Values.service.headless.annotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.headless.annotations .Values.service.annotationsHeadless .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + clusterIP: None + ports: + - name: {{ .Values.service.portNames.epmd }} + port: {{ .Values.service.ports.epmd }} + targetPort: epmd + {{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }} + - name: {{ .Values.service.portNames.amqp }} + port: {{ .Values.service.ports.amqp }} + targetPort: amqp + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: {{ .Values.service.portNames.amqpTls }} + port: {{ .Values.service.ports.amqpTls }} + targetPort: amqp-tls + {{- end }} + - name: {{ .Values.service.portNames.dist }} + port: {{ .Values.service.ports.dist }} + targetPort: dist + {{- if .Values.service.managerPortEnabled }} + - name: {{ .Values.service.portNames.manager }} + port: {{ .Values.service.ports.manager }} + targetPort: stats + {{- end }} + {{- if .Values.service.extraPortsHeadless }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPortsHeadless "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + publishNotReadyAddresses: true diff --git a/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/svc.yaml b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/svc.yaml new file mode 100644 index 000000000..366278e6b --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/svc.yaml @@ -0,0 +1,110 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: v1 +kind: Service +metadata: + name: {{ include "common.names.fullname" . }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + {{- if or .Values.service.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if not (empty .Values.service.clusterIP) }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerClass)) }} + loadBalancerClass: {{ .Values.service.loadBalancerClass }} + {{- end }} + {{- if eq .Values.service.type "LoadBalancer" }} + {{- if not (empty .Values.service.loadBalancerIP) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + allocateLoadBalancerNodePorts: {{ .Values.service.allocateLoadBalancerNodePorts }} + {{- end }} + {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if .Values.service.sessionAffinity }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- end }} + {{- if .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.service.externalIPs }} + externalIPs: {{- toYaml .Values.service.externalIPs | nindent 4 }} + {{- end }} + ports: + {{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }} + - name: {{ .Values.service.portNames.amqp }} + port: {{ .Values.service.ports.amqp }} + targetPort: amqp + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.amqp)) }} + nodePort: {{ .Values.service.nodePorts.amqp }} + {{- end }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: {{ .Values.service.portNames.amqpTls }} + port: {{ .Values.service.ports.amqpTls }} + targetPort: amqp-tls + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.amqpTls)) }} + nodePort: {{ .Values.service.nodePorts.amqpTls }} + {{- end }} + {{- end }} + {{- if .Values.service.epmdPortEnabled }} + - name: {{ .Values.service.portNames.epmd }} + port: {{ .Values.service.ports.epmd }} + targetPort: epmd + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.epmd)) }} + nodePort: {{ .Values.service.nodePorts.epmd }} + {{- end }} + {{- end }} + {{- if .Values.service.distPortEnabled }} + - name: {{ .Values.service.portNames.dist }} + port: {{ .Values.service.ports.dist }} + targetPort: dist + {{- if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.dist)) }} + nodePort: {{ .Values.service.nodePorts.dist }} + {{- end }} + {{- end }} + {{- if .Values.service.managerPortEnabled }} + - name: {{ .Values.service.portNames.manager }} + port: {{ .Values.service.ports.manager }} + targetPort: stats + {{- if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.manager)) }} + nodePort: {{ .Values.service.nodePorts.manager }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: {{ .Values.service.portNames.metrics }} + port: {{ .Values.service.ports.metrics }} + targetPort: metrics + {{- if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.metrics)) }} + nodePort: {{ .Values.service.nodePorts.metrics }} + {{- end }} + {{- end }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} diff --git a/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/tls-secrets.yaml b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/tls-secrets.yaml new file mode 100644 index 000000000..3261f1d35 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/tls-secrets.yaml @@ -0,0 +1,35 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "rabbitmq.createTlsSecret" . ) }} +{{- $secretName := printf "%s-certs" (include "common.names.fullname" .) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + {{- if not .Values.auth.tls.autoGenerated }} + tls.crt: {{ required "A valid .Values.auth.tls.serverCertificate entry required!" .Values.auth.tls.serverCertificate | b64enc | quote }} + tls.key: {{ required "A valid .Values.auth.tls.serverKey entry required!" .Values.auth.tls.serverKey | b64enc | quote }} + ca.crt: {{ required "A valid .Values.auth.tls.caCertificate entry required!" .Values.auth.tls.caCertificate | b64enc | quote }} + {{- else }} + {{- $ca := genCA "rabbitmq-internal-ca" 365 }} + {{- $fullname := include "common.names.fullname" . }} + {{- $releaseNamespace := .Release.Namespace }} + {{- $clusterDomain := .Values.clusterDomain }} + {{- $serviceName := include "common.names.fullname" . }} + {{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname }} + {{- $cert := genSignedCert $fullname nil $altNames 365 $ca }} + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} + {{- end }} +{{- end }} diff --git a/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/validation.yaml b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/validation.yaml new file mode 100644 index 000000000..ecf3cab0e --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/templates/validation.yaml @@ -0,0 +1,7 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- include "rabbitmq.validateValues" . }} + diff --git a/stable/jfrog-platform/local_dependancy_charts/rabbitmq/values.schema.json b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/values.schema.json new file mode 100644 index 000000000..8ef33eff4 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/values.schema.json @@ -0,0 +1,100 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "auth": { + "type": "object", + "properties": { + "username": { + "type": "string", + "title": "RabbitMQ user", + "form": true + }, + "password": { + "type": "string", + "title": "RabbitMQ password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set" + } + } + }, + "extraConfiguration": { + "type": "string", + "title": "Extra RabbitMQ Configuration", + "form": true, + "render": "textArea", + "description": "Extra configuration to be appended to RabbitMQ Configuration" + }, + "replicaCount": { + "type": "integer", + "form": true, + "title": "Number of replicas", + "description": "Number of replicas to deploy" + }, + "persistence": { + "type": "object", + "title": "Persistence configuration", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "persistence/enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Prometheus metrics for RabbitMQ", + "description": "Install Prometheus plugin in the RabbitMQ container", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "value": false, + "path": "metrics/enabled" + } + } + } + } + } + } + } +} diff --git a/stable/jfrog-platform/local_dependancy_charts/rabbitmq/values.yaml b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/values.yaml new file mode 100644 index 000000000..ca3a283e1 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/rabbitmq/values.yaml @@ -0,0 +1,1527 @@ +# Copyright Broadcom, Inc. All Rights Reserved. +# SPDX-License-Identifier: APACHE-2.0 + +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass +## + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s) +## @param global.storageClass DEPRECATED: use global.defaultStorageClass instead +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + defaultStorageClass: "" + storageClass: "" + ## Compatibility adaptations for Kubernetes platforms + ## + compatibility: + ## Compatibility adaptations for Openshift + ## + openshift: + ## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) + ## + adaptSecurityContext: auto +## @section RabbitMQ Image parameters +## Bitnami RabbitMQ image version +## ref: https://hub.docker.com/r/bitnami/rabbitmq/tags/ +## @param image.registry [default: REGISTRY_NAME] RabbitMQ image registry +## @param image.repository [default: REPOSITORY_NAME/rabbitmq] RabbitMQ image repository +## @skip image.tag RabbitMQ image tag (immutable tags are recommended) +## @param image.digest RabbitMQ image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy RabbitMQ image pull policy +## @param image.pullSecrets Specify docker-registry secret names as an array +## @param image.debug Set to true if you would like to see extra information on logs +## +image: + registry: docker.io + repository: bitnami/rabbitmq + tag: 3.13.6-debian-12-r1 + digest: "" + ## set to true if you would like to see extra information on logs + ## It turns BASH and/or NAMI debugging in the image + ## + debug: false + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] +## @section Common parameters +## + +## @param nameOverride String to partially override rabbitmq.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override rabbitmq.fullname template +## +fullnameOverride: "" +## @param namespaceOverride String to fully override common.names.namespace +## +namespaceOverride: "" +## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set) +## +kubeVersion: "" +## @param clusterDomain Kubernetes Cluster Domain +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## @param servicenameOverride String to partially override headless service name +## +servicenameOverride: "" +## @param commonLabels Labels to add to all deployed objects +## +commonLabels: {} +## @param serviceBindings.enabled Create secret for service binding (Experimental) +## Ref: https://servicebinding.io/service-provider/ +## +serviceBindings: + enabled: false +## @param enableServiceLinks Whether information about services should be injected into pod's environment variable +## The environment variables injected by service links are not used, but can lead to slow boot times or slow running of the scripts when there are many services in the current namespace. +## If you experience slow pod startups or slow running of the scripts you probably want to set this to `false`. +## +enableServiceLinks: true +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity +## @param automountServiceAccountToken Mount Service Account token in pod +## +automountServiceAccountToken: true +## @param hostAliases Deployment pod host aliases +## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] +## @param dnsPolicy DNS Policy for pod +## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ +## E.g. +## dnsPolicy: ClusterFirst +## +dnsPolicy: "" +## @param dnsConfig DNS Configuration pod +## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ +## E.g. +## dnsConfig: +## options: +## - name: ndots +## value: "4" +## +dnsConfig: {} +## RabbitMQ Authentication parameters +## +auth: + ## @param auth.username RabbitMQ application username + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#environment-variables + ## + username: user + ## @param auth.password RabbitMQ application password + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#environment-variables + ## + password: "" + ## @param auth.securePassword Whether to set the RabbitMQ password securely. This is incompatible with loading external RabbitMQ definitions and 'true' when not setting the auth.password parameter. + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#environment-variables + ## + securePassword: true + ## @param auth.existingPasswordSecret Existing secret with RabbitMQ credentials (existing secret must contain a value for `rabbitmq-password` key or override with setting auth.existingSecretPasswordKey) + ## e.g: + ## existingPasswordSecret: name-of-existing-secret + ## + existingPasswordSecret: "" + ## @param auth.existingSecretPasswordKey [default: rabbitmq-password] Password key to be retrieved from existing secret + ## NOTE: ignored unless `auth.existingSecret` parameter is set + ## + existingSecretPasswordKey: "" + ## @param auth.enableLoopbackUser If enabled, the user `auth.username` can only connect from localhost + ## + enableLoopbackUser: false + ## @param auth.erlangCookie Erlang cookie to determine whether different nodes are allowed to communicate with each other + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#environment-variables + ## + erlangCookie: "" + ## @param auth.existingErlangSecret Existing secret with RabbitMQ Erlang cookie (must contain a value for `rabbitmq-erlang-cookie` key or override with auth.existingSecretErlangKey) + ## e.g: + ## existingErlangSecret: name-of-existing-secret + ## + existingErlangSecret: "" + ## @param auth.existingSecretErlangKey [default: rabbitmq-erlang-cookie] Erlang cookie key to be retrieved from existing secret + ## NOTE: ignored unless `auth.existingErlangSecret` parameter is set + ## + existingSecretErlangKey: "" + ## Enable encryption to rabbitmq + ## ref: https://www.rabbitmq.com/ssl.html + ## @param auth.tls.enabled Enable TLS support on RabbitMQ + ## @param auth.tls.autoGenerated Generate automatically self-signed TLS certificates + ## @param auth.tls.failIfNoPeerCert When set to true, TLS connection will be rejected if client fails to provide a certificate + ## @param auth.tls.sslOptionsVerify Should [peer verification](https://www.rabbitmq.com/ssl.html#peer-verification) be enabled? + ## @param auth.tls.sslOptionsPassword.enabled Enable usage of password for private Key + ## @param auth.tls.sslOptionsPassword.existingSecret Name of existing Secret containing the sslOptionsPassword + ## @param auth.tls.sslOptionsPassword.key Enable Key referring to sslOptionsPassword in Secret specified in auth.tls.sslOptionsPassword.existingSecret + ## @param auth.tls.sslOptionsPassword.password Use this string as Password. If set, auth.tls.sslOptionsPassword.existingSecret and auth.tls.sslOptionsPassword.key are ignored + ## @param auth.tls.caCertificate Certificate Authority (CA) bundle content + ## @param auth.tls.serverCertificate Server certificate content + ## @param auth.tls.serverKey Server private key content + ## @param auth.tls.existingSecret Existing secret with certificate content to RabbitMQ credentials + ## @param auth.tls.existingSecretFullChain Whether or not the existing secret contains the full chain in the certificate (`tls.crt`). Will be used in place of `ca.cert` if `true`. + ## @param auth.tls.overrideCaCertificate Existing secret with certificate content be mounted instead of the `ca.crt` coming from caCertificate or existingSecret/existingSecretFullChain. + ## + tls: + enabled: false + autoGenerated: false + failIfNoPeerCert: true + sslOptionsVerify: verify_peer + sslOptionsPassword: + enabled: false + existingSecret: "" + key: "" + password: "" + caCertificate: "" + serverCertificate: "" + serverKey: "" + existingSecret: "" + existingSecretFullChain: false + overrideCaCertificate: "" +## @param logs Path of the RabbitMQ server's Erlang log file. Value for the `RABBITMQ_LOGS` environment variable +## ref: https://www.rabbitmq.com/logging.html#log-file-location +## +logs: "-" +## @param ulimitNofiles RabbitMQ Max File Descriptors +## ref: https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#environment-variables +## ref: https://www.rabbitmq.com/install-debian.html#kernel-resource-limits +## +ulimitNofiles: "65535" +## RabbitMQ maximum available scheduler threads and online scheduler threads. By default it will create a thread per CPU detected, with the following parameters you can tune it manually. +## ref: https://hamidreza-s.github.io/erlang/scheduling/real-time/preemptive/migration/2016/02/09/erlang-scheduler-details.html#scheduler-threads +## ref: https://github.com/bitnami/charts/issues/2189 +## @param maxAvailableSchedulers RabbitMQ maximum available scheduler threads +## @param onlineSchedulers RabbitMQ online scheduler threads +## +maxAvailableSchedulers: "" +onlineSchedulers: "" +## The memory threshold under which RabbitMQ will stop reading from client network sockets, in order to avoid being killed by the OS +## ref: https://www.rabbitmq.com/alarms.html +## ref: https://www.rabbitmq.com/memory.html#threshold +## +memoryHighWatermark: + ## @param memoryHighWatermark.enabled Enable configuring Memory high watermark on RabbitMQ + ## + enabled: false + ## @param memoryHighWatermark.type Memory high watermark type. Either `absolute` or `relative` + ## + type: "relative" + ## Memory high watermark value. + ## @param memoryHighWatermark.value Memory high watermark value + ## The default value of 0.4 stands for 40% of available RAM + ## Note: the memory relative limit is applied to the resource.limits.memory to calculate the memory threshold + ## You can also use an absolute value, e.g.: 256Mi + ## + value: 0.4 +## @param plugins List of default plugins to enable (should only be altered to remove defaults; for additional plugins use `extraPlugins`) +## +plugins: "rabbitmq_management rabbitmq_peer_discovery_k8s" + +## @param queue_master_locator Changes the queue_master_locator setting in the rabbitmq config file +## +queue_master_locator: min-masters + +## @param communityPlugins List of Community plugins (URLs) to be downloaded during container initialization +## Combine it with extraPlugins to also enable them. +## +communityPlugins: "" +## @param extraPlugins Extra plugins to enable (single string containing a space-separated list) +## Use this instead of `plugins` to add new plugins +## +extraPlugins: "rabbitmq_auth_backend_ldap" +## Clustering settings +## +clustering: + ## @param clustering.enabled Enable RabbitMQ clustering + ## + enabled: true + ## @param clustering.name RabbitMQ cluster name + ## If not set, a name is generated using the common.names.fullname template + ## + name: "" + ## @param clustering.addressType Switch clustering mode. Either `ip` or `hostname` + ## + addressType: hostname + ## @param clustering.rebalance Rebalance master for queues in cluster when new replica is created + ## ref: https://www.rabbitmq.com/rabbitmq-queues.8.html#rebalance + ## + rebalance: false + ## @param clustering.forceBoot Force boot of an unexpectedly shut down cluster (in an unexpected order). + ## forceBoot executes 'rabbitmqctl force_boot' to force boot cluster shut down unexpectedly in an unknown order + ## ref: https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot + ## + forceBoot: false + ## @param clustering.partitionHandling Switch Partition Handling Strategy. Either `autoheal` or `pause_minority` or `pause_if_all_down` or `ignore` + ## ref: https://www.rabbitmq.com/partitions.html#automatic-handling + ## + partitionHandling: autoheal +## Loading a RabbitMQ definitions file to configure RabbitMQ +## +loadDefinition: + ## @param loadDefinition.enabled Enable loading a RabbitMQ definitions file to configure RabbitMQ + ## + enabled: false + ## @param loadDefinition.file Name of the definitions file + ## + file: "/app/load_definition.json" + ## @param loadDefinition.existingSecret Existing secret with the load definitions file + ## Can be templated if needed, e.g: + ## existingSecret: "{{ .Release.Name }}-load-definition" + ## + existingSecret: "" +## @param command Override default container command (useful when using custom images) +## +command: [] +## @param args Override default container args (useful when using custom images) +## +args: [] +## @param lifecycleHooks Overwrite livecycle for the RabbitMQ container(s) to automate configuration before or after startup +## +lifecycleHooks: {} +## @param terminationGracePeriodSeconds Default duration in seconds k8s waits for container to exit before sending kill signal. +## Any time in excess of 10 seconds will be spent waiting for any synchronization necessary for cluster not to lose data. +## +terminationGracePeriodSeconds: 120 +## @param extraEnvVars Extra environment variables to add to RabbitMQ pods +## E.g: +## extraEnvVars: +## - name: FOO +## value: BAR +## +extraEnvVars: [] +## @param extraEnvVarsCM Name of existing ConfigMap containing extra environment variables +## +extraEnvVarsCM: "" +## @param extraEnvVarsSecret Name of existing Secret containing extra environment variables (in case of sensitive data) +## +extraEnvVarsSecret: "" +## Container Ports +## @param containerPorts.amqp +## @param containerPorts.amqpTls +## @param containerPorts.dist +## @param containerPorts.manager +## @param containerPorts.epmd +## @param containerPorts.metrics +## +containerPorts: + amqp: 5672 + amqpTls: 5671 + dist: 25672 + manager: 15672 + epmd: 4369 + metrics: 9419 +## @param initScripts Dictionary of init scripts. Evaluated as a template. +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## For example: +## initScripts: +## my_init_script.sh: | +## #!/bin/sh +## echo "Do something." +## +initScripts: {} +## @param initScriptsCM ConfigMap with the init scripts. Evaluated as a template. +## Note: This will override initScripts +## +initScriptsCM: "" +## @param initScriptsSecret Secret containing `/docker-entrypoint-initdb.d` scripts to be executed at initialization time that contain sensitive data. Evaluated as a template. +## +initScriptsSecret: "" +## @param extraContainerPorts Extra ports to be included in container spec, primarily informational +## E.g: +## extraContainerPorts: +## - name: new_port_name +## containerPort: 1234 +## +extraContainerPorts: [] +## @param configuration [string] RabbitMQ Configuration file content: required cluster configuration +## Do not override unless you know what you are doing. +## To add more configuration, use `extraConfiguration` of `advancedConfiguration` instead +## + +## RabbitMQ tcp_listen_options parameters +## See : https://www.rabbitmq.com/networking.html for additional information +## +tcpListenOptions: + ## @param tcpListenOptions.backlog Maximum size of the unaccepted TCP connections queue + ## + backlog: 128 + ## @param tcpListenOptions.nodelay When set to true, deactivates Nagle's algorithm. Default is true. Highly recommended for most users. + ## + nodelay: true + ## tcpListenOptions.linger + ## + linger: + ## @param tcpListenOptions.linger.lingerOn Enable Server socket lingering + ## + lingerOn: true + ## @param tcpListenOptions.linger.timeout Server Socket lingering timeout + ## + timeout: 0 + ## @param tcpListenOptions.keepalive When set to true, enables TCP keepalives + ## + keepalive: false +configuration: |- + ## Username and password + default_user = {{ .Values.auth.username }} + {{- if and (not .Values.auth.securePassword) .Values.auth.password }} + default_pass = {{ .Values.auth.password }} + {{- end }} + {{- if .Values.clustering.enabled }} + ## Clustering + ## + cluster_name = {{ default (include "common.names.fullname" .) .Values.clustering.name }} + cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s + cluster_formation.k8s.host = kubernetes.default + cluster_formation.k8s.address_type = {{ .Values.clustering.addressType }} + {{- $svcName := printf "%s-%s" (include "common.names.fullname" .) (default "headless" .Values.servicenameOverride) }} + cluster_formation.k8s.service_name = {{ $svcName }} + cluster_formation.k8s.hostname_suffix = .{{ $svcName }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }} + cluster_formation.node_cleanup.interval = 10 + cluster_formation.node_cleanup.only_log_warning = true + cluster_partition_handling = {{ .Values.clustering.partitionHandling }} + {{- end }} + {{ if and .Values.clustering.enabled .Values.loadDefinition.enabled }} + cluster_formation.target_cluster_size_hint = {{ .Values.replicaCount }} + {{ end }} + {{- if .Values.loadDefinition.enabled }} + load_definitions = {{ .Values.loadDefinition.file }} + {{- end }} + # queue master locator + queue_master_locator = {{ .Values.queue_master_locator }} + # enable loopback user + {{- if not (empty .Values.auth.username) }} + loopback_users.{{ .Values.auth.username }} = {{ .Values.auth.enableLoopbackUser }} + {{- else}} + loopback_users.guest = {{ .Values.auth.enableLoopbackUser }} + {{- end }} + {{ template "rabbitmq.extraConfiguration" . }} + {{- if .Values.auth.tls.enabled }} + ssl_options.verify = {{ .Values.auth.tls.sslOptionsVerify }} + listeners.ssl.default = {{ .Values.service.ports.amqpTls }} + ssl_options.fail_if_no_peer_cert = {{ .Values.auth.tls.failIfNoPeerCert }} + ssl_options.cacertfile = /opt/bitnami/rabbitmq/certs/ca_certificate.pem + ssl_options.certfile = /opt/bitnami/rabbitmq/certs/server_certificate.pem + ssl_options.keyfile = /opt/bitnami/rabbitmq/certs/server_key.pem + {{- if .Values.auth.tls.sslOptionsPassword.enabled }} + ssl_options.password = {{ template "rabbitmq.tlsSslOptionsPassword" . }} + {{- end }} + {{- end }} + {{- if .Values.ldap.enabled }} + auth_backends.1.authn = ldap + auth_backends.1.authz = {{ ternary "ldap" "internal" .Values.ldap.authorisationEnabled }} + auth_backends.2 = internal + {{- $host := list }} + {{- $port := ternary 636 389 .Values.ldap.tls.enabled }} + {{- if .Values.ldap.uri }} + {{- $hostPort := get (urlParse .Values.ldap.uri) "host" }} + {{- $host = list (index (splitList ":" $hostPort) 0) -}} + {{- if (contains ":" $hostPort) }} + {{- $port = index (splitList ":" $hostPort) 1 -}} + {{- end }} + {{- end }} + {{- range $index, $server := concat $host .Values.ldap.servers }} + auth_ldap.servers.{{ add $index 1 }} = {{ $server }} + {{- end }} + auth_ldap.port = {{ coalesce .Values.ldap.port $port }} + {{- if or .Values.ldap.user_dn_pattern .Values.ldap.userDnPattern }} + auth_ldap.user_dn_pattern = {{ coalesce .Values.ldap.user_dn_pattern .Values.ldap.userDnPattern }} + {{- end }} + {{- if .Values.ldap.basedn }} + auth_ldap.dn_lookup_base = {{ .Values.ldap.basedn }} + {{- end }} + {{- if .Values.ldap.uidField }} + auth_ldap.dn_lookup_attribute = {{ .Values.ldap.uidField }} + {{- end }} + {{- if .Values.ldap.binddn }} + auth_ldap.dn_lookup_bind.user_dn = {{ .Values.ldap.binddn }} + auth_ldap.dn_lookup_bind.password = {{ required "'ldap.bindpw' is required when 'ldap.binddn' is defined" .Values.ldap.bindpw }} + {{- end }} + {{- if .Values.ldap.tls.enabled }} + auth_ldap.use_ssl = {{ not .Values.ldap.tls.startTls }} + auth_ldap.use_starttls = {{ .Values.ldap.tls.startTls }} + {{- if .Values.ldap.tls.CAFilename }} + auth_ldap.ssl_options.cacertfile = {{ .Values.ldap.tls.certificatesMountPath }}/{{ .Values.ldap.tls.CAFilename }} + {{- end }} + {{- if .Values.ldap.tls.certFilename }} + auth_ldap.ssl_options.certfile = {{ .Values.ldap.tls.certificatesMountPath }}/{{ .Values.ldap.tls.certFilename }} + auth_ldap.ssl_options.keyfile = {{ .Values.ldap.tls.certificatesMountPath }}/{{ required "'ldap.tls.certKeyFilename' is required when 'ldap.tls.certFilename' is defined" .Values.ldap.tls.certKeyFilename }} + {{- end }} + {{- if .Values.ldap.tls.skipVerify }} + auth_ldap.ssl_options.verify = verify_none + auth_ldap.ssl_options.fail_if_no_peer_cert = false + {{- else if .Values.ldap.tls.verify }} + auth_ldap.ssl_options.verify = {{ .Values.ldap.tls.verify }} + {{- end }} + {{- end }} + {{- end }} + ## Prometheus metrics + ## + prometheus.tcp.port = {{ .Values.containerPorts.metrics }} + {{- if .Values.memoryHighWatermark.enabled }} + ## Memory Threshold + ## + total_memory_available_override_value = {{ include "rabbitmq.toBytes" (dig "limits" "memory" "" .Values.resources) }} + {{- if (eq .Values.memoryHighWatermark.type "absolute") }} + vm_memory_high_watermark.{{ .Values.memoryHighWatermark.type }} = {{ include "rabbitmq.toBytes" .Values.memoryHighWatermark.value }} + {{- else if (eq .Values.memoryHighWatermark.type "relative") }} + vm_memory_high_watermark.{{ .Values.memoryHighWatermark.type }} = {{ .Values.memoryHighWatermark.value }} + {{- end }} + ## TCP Listen Options + ## + tcp_listen_options.backlog = {{ .Values.tcpListenOptions.backlog }} + tcp_listen_options.nodelay = {{ .Values.tcpListenOptions.nodelay }} + tcp_listen_options.linger.on = {{ .Values.tcpListenOptions.linger.lingerOn }} + tcp_listen_options.linger.timeout = {{ .Values.tcpListenOptions.linger.timeout }} + tcp_listen_options.keepalive = {{ .Values.tcpListenOptions.keepalive }} + {{- end }} +## @param configurationExistingSecret Existing secret with the configuration to use as rabbitmq.conf. +## Must contain the key "rabbitmq.conf" +## Takes precedence over `configuration`, so do not use both simultaneously +## With providing an existingSecret, extraConfiguration and extraConfigurationExistingSecret do not take any effect +## +configurationExistingSecret: "" +## @param extraConfiguration [string] Configuration file content: extra configuration to be appended to RabbitMQ configuration +## Use this instead of `configuration` to add more configuration +## Do not use simultaneously with `extraConfigurationExistingSecret` +## +extraConfiguration: |- + #default_vhost = {{ .Release.Namespace }}-vhost + #disk_free_limit.absolute = 50MB +## @param extraConfigurationExistingSecret Existing secret with the extra configuration to append to `configuration`. +## Must contain the key "extraConfiguration" +## Takes precedence over `extraConfiguration`, so do not use both simultaneously +## +extraConfigurationExistingSecret: "" +## @param advancedConfiguration Configuration file content: advanced configuration +## Use this as additional configuration in classic config format (Erlang term configuration format) +## +## LDAP authorisation example: +## advancedConfiguration: |- +## [{rabbitmq_auth_backend_ldap,[ +## {tag_queries, [{administrator, {constant, true}}, +## {management, {constant, true}}]} +## ]}]. +## +## If both, advancedConfiguration and advancedConfigurationExistingSecret are set, then advancedConfiguration +## will be used instead of the secret. +# +advancedConfiguration: "" +## @param advancedConfigurationExistingSecret Existing secret with the advanced configuration file (must contain a key `advanced.config`). +## Use this as additional configuration in classic config format (Erlang term configuration format) as in advancedConfiguration +## Do not use in combination with advancedConfiguration, will be ignored +## +advancedConfigurationExistingSecret: "" +## This subsystem was introduced in RabbitMQ 3.8.0 to allow rolling upgrades of cluster members without shutting down the entire cluster. +## Feature flags are a mechanism that controls what features are considered to be enabled or available on all cluster nodes. If a feature flag is enabled, so is its associated feature (or behavior). If not then all nodes in the cluster will disable the feature (behavior). +## e.g, drop_unroutable_metric,empty_basic_get_metric,implicit_default_bindings,maintenance_mode_status,quorum_queue,virtual_host_metadata +## @param featureFlags that controls what features are considered to be enabled or available on all cluster nodes. +## +featureFlags: "" +## LDAP configuration +## +ldap: + ## @param ldap.enabled Enable LDAP support + ## + enabled: false + ## @param ldap.uri LDAP connection string. + ## + uri: "" + ## @param ldap.servers List of LDAP servers hostnames. This is valid only if ldap.uri is not set + ## + servers: [] + ## @param ldap.port LDAP servers port. This is valid only if ldap.uri is not set + ## + port: "" + ## DEPRECATED ldap.user_dn_pattern it will removed in a future, please use userDnPattern instead + ## Pattern used to translate the provided username into a value to be used for the LDAP bind + ## @param ldap.userDnPattern Pattern used to translate the provided username into a value to be used for the LDAP bind. + ## ref: https://www.rabbitmq.com/ldap.html#usernames-and-dns + ## + userDnPattern: "" + ## @param ldap.binddn DN of the account used to search in the LDAP server. + ## + binddn: "" + ## @param ldap.bindpw Password for binddn account. + ## + bindpw: "" + ## @param ldap.basedn Base DN path where binddn account will search for the users. + ## + basedn: "" + ## @param ldap.uidField Field used to match with the user name (uid, samAccountName, cn, etc). It matches with 'dn_lookup_attribute' in RabbitMQ configuration + ## ref: https://www.rabbitmq.com/ldap.html#usernames-and-dns + ## + ## @param ldap.uidField Field used to match with the user name (uid, samAccountName, cn, etc). It matches with 'dn_lookup_attribute' in RabbitMQ configuration + ## + uidField: "" + ## @param ldap.authorisationEnabled Enable LDAP authorisation. Please set 'advancedConfiguration' with tag, topic, resources and vhost mappings + ## ref: https://www.rabbitmq.com/ldap.html#authorisation + ## + authorisationEnabled: false + ## @param ldap.tls.enabled Enabled TLS configuration. + ## @param ldap.tls.startTls Use STARTTLS instead of LDAPS. + ## @param ldap.tls.skipVerify Skip any SSL verification (hostanames or certificates) + ## @param ldap.tls.verify Verify connection. Valid values are 'verify_peer' or 'verify_none' + ## @param ldap.tls.certificatesMountPath Where LDAP certifcates are mounted. + ## @param ldap.tls.certificatesSecret Secret with LDAP certificates. + ## @param ldap.tls.CAFilename CA certificate filename. Should match with the CA entry key in the ldap.tls.certificatesSecret. + ## @param ldap.tls.certFilename Client certificate filename to authenticate against the LDAP server. Should match with certificate the entry key in the ldap.tls.certificatesSecret. + ## @param ldap.tls.certKeyFilename Client Key filename to authenticate against the LDAP server. Should match with certificate the entry key in the ldap.tls.certificatesSecret. + ## + tls: + enabled: false + startTls: false + skipVerify: false + verify: "verify_peer" + certificatesMountPath: /opt/bitnami/rabbitmq/ldap/certs + certificatesSecret: "" + CAFilename: "" + certFilename: "" + certKeyFilename: "" +## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts +## Examples: +## extraVolumeMounts: +## - name: extras +## mountPath: /usr/share/extras +## readOnly: true +## +extraVolumeMounts: [] +## @param extraVolumes Optionally specify extra list of additional volumes . +## Example: +## extraVolumes: +## - name: extras +## emptyDir: {} +## +extraVolumes: [] +## @param extraSecrets Optionally specify extra secrets to be created by the chart. +## This can be useful when combined with load_definitions to automatically create the secret containing the definitions to be loaded. +## Example: +## extraSecrets: +## load-definition: +## load_definition.json: | +## { +## ... +## } +## +extraSecrets: {} +## @param extraSecretsPrependReleaseName Set this flag to true if extraSecrets should be created with prepended. +## +extraSecretsPrependReleaseName: false +## @section Statefulset parameters +## + +## @param replicaCount Number of RabbitMQ replicas to deploy +## +replicaCount: 1 +## @param schedulerName Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" +## RabbitMQ should be initialized one by one when building cluster for the first time. +## Therefore, the default value of podManagementPolicy is 'OrderedReady' +## Once the RabbitMQ participates in the cluster, it waits for a response from another +## RabbitMQ in the same cluster at reboot, except the last RabbitMQ of the same cluster. +## If the cluster exits gracefully, you do not need to change the podManagementPolicy +## because the first RabbitMQ of the statefulset always will be last of the cluster. +## However if the last RabbitMQ of the cluster is not the first RabbitMQ due to a failure, +## you must change podManagementPolicy to 'Parallel'. +## ref : https://www.rabbitmq.com/clustering.html#restarting +## @param podManagementPolicy Pod management policy +## +podManagementPolicy: OrderedReady +## @param podLabels RabbitMQ Pod labels. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} +## @param podAnnotations RabbitMQ Pod annotations. Evaluated as a template +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} +## @param updateStrategy.type Update strategy type for RabbitMQ statefulset +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## +updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate or OnDelete + ## + type: RollingUpdate +## @param statefulsetLabels RabbitMQ statefulset labels. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +statefulsetLabels: {} +## @param statefulsetAnnotations RabbitMQ statefulset annotations. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +statefulsetAnnotations: {} +## @param priorityClassName Name of the priority class to be used by RabbitMQ pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" +## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAffinityPreset: "" +## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAntiAffinityPreset: soft +## Node affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## +nodeAffinityPreset: + ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] +## @param affinity Affinity for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set +## +affinity: {} +## @param nodeSelector Node labels for pod assignment. Evaluated as a template +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ +## +nodeSelector: {} +## @param tolerations Tolerations for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] +## @param topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods +## +topologySpreadConstraints: [] +## RabbitMQ pods' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param podSecurityContext.enabled Enable RabbitMQ pods' Security Context +## @param podSecurityContext.fsGroupChangePolicy Set filesystem group change policy +## @param podSecurityContext.sysctls Set kernel settings using the sysctl interface +## @param podSecurityContext.supplementalGroups Set filesystem extra groups +## @param podSecurityContext.fsGroup Set RabbitMQ pod's Security Context fsGroup +## +podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 +## @param containerSecurityContext.enabled Enabled RabbitMQ containers' Security Context +## @param containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container +## @param containerSecurityContext.runAsUser Set RabbitMQ containers' Security Context runAsUser +## @param containerSecurityContext.runAsGroup Set RabbitMQ containers' Security Context runAsGroup +## @param containerSecurityContext.runAsNonRoot Set RabbitMQ container's Security Context runAsNonRoot +## @param containerSecurityContext.allowPrivilegeEscalation Set container's privilege escalation +## @param containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem +## @param containerSecurityContext.capabilities.drop Set container's Security Context runAsNonRoot +## @param containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## Example: +## containerSecurityContext: +## capabilities: +## drop: ["NET_RAW"] +## readOnlyRootFilesystem: true +## +containerSecurityContext: + enabled: true + seLinuxOptions: null + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" +## RabbitMQ containers' resource requests and limits +## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). +## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 +## +resourcesPreset: "micro" +## @param resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) +## Example: +## resources: +## requests: +## cpu: 2 +## memory: 512Mi +## limits: +## cpu: 3 +## memory: 1024Mi +## +resources: {} +## Configure RabbitMQ containers' extra options for liveness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## @param livenessProbe.enabled Enable livenessProbe +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## +livenessProbe: + enabled: true + initialDelaySeconds: 120 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 6 + successThreshold: 1 +## Configure RabbitMQ containers' extra options for readiness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## @param readinessProbe.enabled Enable readinessProbe +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## +readinessProbe: + enabled: true + initialDelaySeconds: 10 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 3 + successThreshold: 1 +## Configure RabbitMQ containers' extra options for startup probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## @param startupProbe.enabled Enable startupProbe +## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe +## @param startupProbe.periodSeconds Period seconds for startupProbe +## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe +## @param startupProbe.failureThreshold Failure threshold for startupProbe +## @param startupProbe.successThreshold Success threshold for startupProbe +## +startupProbe: + enabled: false + initialDelaySeconds: 10 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 3 + successThreshold: 1 +## @param customLivenessProbe Override default liveness probe +## +customLivenessProbe: {} +## @param customReadinessProbe Override default readiness probe +## +customReadinessProbe: {} +## @param customStartupProbe Define a custom startup probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes +## +customStartupProbe: {} +## @param initContainers Add init containers to the RabbitMQ pod +## Example: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] +## @param sidecars Add sidecar containers to the RabbitMQ pod +## Example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] +## Pod Disruption Budget configuration +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +## +pdb: + ## @param pdb.create Enable/disable a Pod Disruption Budget creation + ## + create: true + ## @param pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## + minAvailable: "" + ## @param pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `pdb.minAvailable` and `pdb.maxUnavailable` are empty. + ## + maxUnavailable: "" +## @section RBAC parameters +## + +## RabbitMQ pods ServiceAccount +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for RabbitMQ pods + ## + create: true + ## @param serviceAccount.name Name of the created serviceAccount + ## If not set and create is true, a name is generated using the rabbitmq.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Auto-mount the service account token in the pod + ## + automountServiceAccountToken: false + ## @param serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`. + ## + annotations: {} +## Role Based Access +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## @param rbac.create Whether RBAC rules should be created + ## binding RabbitMQ ServiceAccount to a role + ## that allows RabbitMQ pods querying the K8s API + ## + create: true + ## @param rbac.rules Custom RBAC rules + ## Example: + ## rules: + ## - apiGroups: + ## - "" + ## resources: + ## - pods + ## verbs: + ## - get + ## - list + ## + rules: [] +## @section Persistence parameters +## +persistence: + ## @param persistence.enabled Enable RabbitMQ data persistence using PVC + ## + enabled: true + ## @param persistence.storageClass PVC Storage Class for RabbitMQ data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param persistence.selector Selector to match an existing Persistent Volume + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param persistence.accessModes PVC Access Modes for RabbitMQ data volume + ## + accessModes: + - ReadWriteOnce + ## @param persistence.existingClaim Provide an existing PersistentVolumeClaims + ## The value is evaluated as a template + ## So, for example, the name can depend on .Release or .Chart + ## + existingClaim: "" + ## @param persistence.mountPath The path the volume will be mounted at + ## Note: useful when using custom RabbitMQ images + ## + mountPath: /opt/bitnami/rabbitmq/.rabbitmq/mnesia + ## @param persistence.subPath The subdirectory of the volume to mount to + ## Useful in dev environments and one PV for multiple services + ## + subPath: "" + ## @param persistence.size PVC Storage Request for RabbitMQ data volume + ## If you change this value, you might have to adjust `rabbitmq.diskFreeLimit` as well + ## + size: 8Gi + ## @param persistence.annotations Persistence annotations. Evaluated as a template + ## Example: + ## annotations: + ## example.io/disk-volume-type: SSD + ## + annotations: {} + ## @param persistence.labels Persistence labels. Evaluated as a template + ## Example: + ## labels: + ## app: my-app + labels: {} +## Persistent Volume Claim Retention Policy +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention +## +persistentVolumeClaimRetentionPolicy: + ## @param persistentVolumeClaimRetentionPolicy.enabled Enable Persistent volume retention policy for rabbitmq Statefulset + ## + enabled: false + ## @param persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced + ## + whenScaled: Retain + ## @param persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted + ## + whenDeleted: Retain +## @section Exposure parameters +## + +## Kubernetes service type +## +service: + ## @param service.type Kubernetes Service type + ## + type: ClusterIP + ## @param service.portEnabled Amqp port. Cannot be disabled when `auth.tls.enabled` is `false`. Listener can be disabled with `listeners.tcp = none`. + ## + portEnabled: true + ## @param service.distPortEnabled Erlang distribution server port + ## + distPortEnabled: true + ## @param service.managerPortEnabled RabbitMQ Manager port + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#environment-variables + ## + managerPortEnabled: true + ## @param service.epmdPortEnabled RabbitMQ EPMD Discovery service port + ## + epmdPortEnabled: true + ## Service ports + ## @param service.ports.amqp Amqp service port + ## @param service.ports.amqpTls Amqp TLS service port + ## @param service.ports.dist Erlang distribution service port + ## @param service.ports.manager RabbitMQ Manager service port + ## @param service.ports.metrics RabbitMQ Prometheues metrics service port + ## @param service.ports.epmd EPMD Discovery service port + ## + ports: + amqp: 5672 + amqpTls: 5671 + dist: 25672 + manager: 15672 + metrics: 9419 + epmd: 4369 + ## Service ports name + ## @param service.portNames.amqp Amqp service port name + ## @param service.portNames.amqpTls Amqp TLS service port name + ## @param service.portNames.dist Erlang distribution service port name + ## @param service.portNames.manager RabbitMQ Manager service port name + ## @param service.portNames.metrics RabbitMQ Prometheues metrics service port name + ## @param service.portNames.epmd EPMD Discovery service port name + ## + portNames: + amqp: "amqp" + amqpTls: "amqp-tls" + dist: "dist" + manager: "http-stats" + metrics: "metrics" + epmd: "epmd" + ## Node ports to expose + ## @param service.nodePorts.amqp Node port for Ampq + ## @param service.nodePorts.amqpTls Node port for Ampq TLS + ## @param service.nodePorts.dist Node port for Erlang distribution + ## @param service.nodePorts.manager Node port for RabbitMQ Manager + ## @param service.nodePorts.epmd Node port for EPMD Discovery + ## @param service.nodePorts.metrics Node port for RabbitMQ Prometheues metrics + ## + nodePorts: + amqp: "" + amqpTls: "" + dist: "" + manager: "" + epmd: "" + metrics: "" + ## @param service.extraPorts Extra ports to expose in the service + ## E.g.: + ## extraPorts: + ## - name: new_svc_name + ## port: 1234 + ## targetPort: 1234 + ## + extraPorts: [] + ## @param service.extraPortsHeadless Extra ports to expose in the headless service + ## E.g.: + ## extraPortsHeadless: + ## - name: new_svc_name + ## port: 1234 + ## targetPort: 1234 + ## + extraPortsHeadless: [] + ## @param service.loadBalancerSourceRanges Address(es) that are allowed when service is `LoadBalancer` + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param service.allocateLoadBalancerNodePorts Whether to allocate node ports when service type is LoadBalancer + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation + ## + allocateLoadBalancerNodePorts: true + ## @param service.externalIPs Set the ExternalIPs + ## + externalIPs: [] + ## @param service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param service.loadBalancerClass Set the LoadBalancerClass + ## + loadBalancerClass: "" + ## @param service.loadBalancerIP Set the LoadBalancerIP + ## + loadBalancerIP: "" + ## @param service.clusterIP Kubernetes service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param service.labels Service labels. Evaluated as a template + ## + labels: {} + ## @param service.annotations Service annotations. Evaluated as a template + ## Example: + ## annotations: + ## service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 + ## + annotations: {} + ## DEPRECATED service.annotationsHeadless it will removed in a future release, please use service.headless.annotations instead + ## @param service.annotationsHeadless Headless Service annotations. Evaluated as a template + ## Example: + ## annotations: + ## external-dns.alpha.kubernetes.io/internal-hostname: rabbitmq.example.com + ## + annotationsHeadless: {} + ## Headless service properties + ## + headless: + ## @param service.headless.annotations Annotations for the headless service. + ## + annotations: {} + ## @param service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} +## Configure the ingress resource that allows you to access the +## RabbitMQ installation. Set up the URL +## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/ +## +ingress: + ## @param ingress.enabled Enable ingress resource for Management console + ## + enabled: false + ## @param ingress.path Path for the default host. You may need to set this to '/*' in order to use this with ALB ingress controllers. + ## + path: / + ## @param ingress.pathType Ingress path type + ## + pathType: ImplementationSpecific + ## @param ingress.hostname Default host for the ingress resource + ## + hostname: rabbitmq.local + ## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md + ## Use this parameter to set the required annotations for cert-manager, see + ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations + ## + ## e.g: + ## annotations: + ## kubernetes.io/ingress.class: nginx + ## cert-manager.io/cluster-issuer: cluster-issuer-name + ## + annotations: {} + ## @param ingress.tls Enable TLS configuration for the hostname defined at `ingress.hostname` parameter + ## TLS certificates will be retrieved from a TLS secret with name: {{- printf "%s-tls" .Values.ingress.hostname }} + ## You can: + ## - Use the `ingress.secrets` parameter to create this TLS secret + ## - Rely on cert-manager to create it by setting the corresponding annotations + ## - Rely on Helm to create self-signed certificates by setting `ingress.selfSigned=true` + ## + tls: false + ## @param ingress.selfSigned Set this to true in order to create a TLS secret for this ingress record + ## using self-signed certificates generated by Helm + ## + selfSigned: false + ## @param ingress.extraHosts The list of additional hostnames to be covered with this ingress record. + ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array + ## e.g: + ## extraHosts: + ## - name: rabbitmq.local + ## path: / + ## + extraHosts: [] + ## @param ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host + ## e.g: + ## extraPaths: + ## - path: /* + ## backend: + ## serviceName: ssl-redirect + ## servicePort: use-annotation + ## + extraPaths: [] + ## @param ingress.extraRules The list of additional rules to be added to this ingress record. Evaluated as a template + ## Useful when looking for additional customization, such as using different backend + ## + extraRules: [] + ## @param ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record. + ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## e.g: + ## extraTls: + ## - hosts: + ## - rabbitmq.local + ## secretName: rabbitmq.local-tls + ## + extraTls: [] + ## @param ingress.secrets Custom TLS certificates as secrets + ## NOTE: 'key' and 'certificate' are expected in PEM format + ## NOTE: 'name' should line up with a 'secretName' set further up + ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates + ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + ## e.g: + ## secrets: + ## - name: rabbitmq.local-tls + ## key: |- + ## -----BEGIN RSA PRIVATE KEY----- + ## ... + ## -----END RSA PRIVATE KEY----- + ## certificate: |- + ## -----BEGIN CERTIFICATE----- + ## ... + ## -----END CERTIFICATE----- + ## + secrets: [] + ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) + ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . + ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ + ## + ingressClassName: "" + ## @param ingress.existingSecret It is you own the certificate as secret. + ## + existingSecret: "" +## Network Policies +## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: true + ## @param networkPolicy.kubeAPIServerPorts [array] List of possible endpoints to kube-apiserver (limit to your cluster settings to increase security) + ## + kubeAPIServerPorts: [443, 6443, 8443] + ## @param networkPolicy.allowExternal Don't require server label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## server label will have network access to the ports server is listening + ## on. When true, server will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. + ## + allowExternalEgress: true + ## @param networkPolicy.addExternalClientAccess Allow access from pods with client label set to "true". Ignored if `networkPolicy.allowExternal` is true. + ## + addExternalClientAccess: true + ## @param networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + extraIngress: [] + ## @param networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param networkPolicy.ingressPodMatchLabels [object] Labels to match to allow traffic from other pods. Ignored if `networkPolicy.allowExternal` is true. + ## e.g: + ## ingressPodMatchLabels: + ## my-client: "true" + # + ingressPodMatchLabels: {} + ## @param networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces. Ignored if `networkPolicy.allowExternal` is true. + ## @param networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces. Ignored if `networkPolicy.allowExternal` is true. + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} +## @section Metrics Parameters +## + +## Prometheus Metrics +## +metrics: + ## @param metrics.enabled Enable exposing RabbitMQ metrics to be gathered by Prometheus + ## + enabled: false + ## @param metrics.plugins Plugins to enable Prometheus metrics in RabbitMQ + ## + plugins: "rabbitmq_prometheus" + ## Prometheus pod annotations + ## @param metrics.podAnnotations [object] Annotations for enabling prometheus to access the metrics endpoint + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.service.ports.metrics }}" + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Specify the namespace in which the serviceMonitor resource will be created + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Specify the interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.serviceMonitor.scrapeTimeout Specify the timeout after which the scrape is ended + ## e.g: + ## scrapeTimeout: 30s + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping. + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricsRelabelConfigs to apply to samples before ingestion. + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels + ## + honorLabels: false + ## @param metrics.serviceMonitor.targetLabels Used to keep given service's labels in target + ## e.g: + ## - app.kubernetes.io/name + ## + targetLabels: {} + ## @param metrics.serviceMonitor.podTargetLabels Used to keep given pod's labels in target + ## e.g: + ## - app.kubernetes.io/name + ## + podTargetLabels: {} + ## @param metrics.serviceMonitor.path Define the path used by ServiceMonitor to scrap metrics + ## Could be /metrics for aggregated metrics or /metrics/per-object for more details + ## + path: "" + ## @param metrics.serviceMonitor.params Define the HTTP URL parameters used by ServiceMonitor + ## + params: {} + ## @param metrics.serviceMonitor.selector ServiceMonitor selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + ## selector: + ## prometheus: my-prometheus + ## + selector: {} + ## @param metrics.serviceMonitor.labels Extra labels for the ServiceMonitor + ## + labels: {} + ## @param metrics.serviceMonitor.annotations Extra annotations for the ServiceMonitor + ## + annotations: {} + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Set this to true to create prometheusRules for Prometheus operator + ## + enabled: false + ## @param metrics.prometheusRule.additionalLabels Additional labels that can be used so prometheusRules will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.prometheusRule.namespace namespace where prometheusRules resource should be created + ## + namespace: "" + ## List of rules, used as template by Helm. + ## @param metrics.prometheusRule.rules List of rules, used as template by Helm. + ## These are just examples rules inspired from https://awesome-prometheus-alerts.grep.to/rules.html + ## rules: + ## - alert: RabbitmqDown + ## expr: rabbitmq_up{service="{{ template "common.names.fullname" . }}"} == 0 + ## for: 5m + ## labels: + ## severity: error + ## annotations: + ## summary: Rabbitmq down (instance {{ "{{ $labels.instance }}" }}) + ## description: RabbitMQ node down + ## - alert: ClusterDown + ## expr: | + ## sum(rabbitmq_running{service="{{ template "common.names.fullname" . }}"}) + ## < {{ .Values.replicaCount }} + ## for: 5m + ## labels: + ## severity: error + ## annotations: + ## summary: Cluster down (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## Less than {{ .Values.replicaCount }} nodes running in RabbitMQ cluster + ## VALUE = {{ "{{ $value }}" }} + ## - alert: ClusterPartition + ## expr: rabbitmq_partitions{service="{{ template "common.names.fullname" . }}"} > 0 + ## for: 5m + ## labels: + ## severity: error + ## annotations: + ## summary: Cluster partition (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## Cluster partition + ## VALUE = {{ "{{ $value }}" }} + ## - alert: OutOfMemory + ## expr: | + ## rabbitmq_node_mem_used{service="{{ template "common.names.fullname" . }}"} + ## / rabbitmq_node_mem_limit{service="{{ template "common.names.fullname" . }}"} + ## * 100 > 90 + ## for: 5m + ## labels: + ## severity: warning + ## annotations: + ## summary: Out of memory (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## Memory available for RabbmitMQ is low (< 10%)\n VALUE = {{ "{{ $value }}" }} + ## LABELS: {{ "{{ $labels }}" }} + ## - alert: TooManyConnections + ## expr: rabbitmq_connectionsTotal{service="{{ template "common.names.fullname" . }}"} > 1000 + ## for: 5m + ## labels: + ## severity: warning + ## annotations: + ## summary: Too many connections (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## RabbitMQ instance has too many connections (> 1000) + ## VALUE = {{ "{{ $value }}" }}\n LABELS: {{ "{{ $labels }}" }} + ## + rules: [] +## @section Init Container Parameters +## + +## Init Container parameters +## Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each component +## values from the securityContext section of the component +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` + ## + enabled: false + ## @param volumePermissions.image.registry [default: REGISTRY_NAME] Init container volume-permissions image registry + ## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] Init container volume-permissions image repository + ## @skip volumePermissions.image.tag Init container volume-permissions image tag + ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Specify docker-registry secret names as an array + ## + image: + registry: docker.io + repository: bitnami/os-shell + tag: 12-debian-12-r26 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init Container resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param volumePermissions.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## Init container' Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.containerSecurityContext.runAsUser + ## @param volumePermissions.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container + ## + containerSecurityContext: + seLinuxOptions: null + runAsUser: 0 diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/.helmignore b/stable/jfrog-platform/local_dependancy_charts/xray/.helmignore new file mode 100644 index 000000000..c7eb1e274 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +OWNERS \ No newline at end of file diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/CHANGELOG.md b/stable/jfrog-platform/local_dependancy_charts/xray/CHANGELOG.md new file mode 100644 index 000000000..8cfe6ec96 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/CHANGELOG.md @@ -0,0 +1,934 @@ +# JFrog Xray Chart Changelog +All changes to this chart will be documented in this file. + +## [103.111.15] - Jan 15, 2025 +* Fix an issue with a warning in the rabbitmq password check +* Updated rabbitmq multi-arch tag version to to `3.13.7-debian-12-r5` +* Updated bitnami kubectl multi-arch tag version to to `1.32.0` + +## [103.109.0] - Nov 27, 2024 +* **Important changes** +* Upgrade rabbitmq chart version to 14.6.6 +* Added catalog as a dependency chart +* **Breaking changes** +* Upgrade postgres chart version to 15.5.20 + * This has many changes related to key names and path in values yaml + * The effected keys present in default yaml have been aligned to the new path in 15.5.20 + * if you have customised any keys, make sure to validate it with the 15.5.20 chart + * Delete the postgresql statefulset and postgresql secret before the upgrade. for more information, please refer the [xray upgrade docs](https://jfrog.com/help/r/jfrog-installation-setup-documentation/upgrading-xray) + +## [103.108.0] - Nov 11, 2024 +* Introduced a validation check in the template to warn users against using the default RabbitMQ password. If a default password is found, the installation will be paused, prompting users to update their credentials before proceeding. +* Fix for panoramic env indentation [GH-1919](https://github.com/jfrog/charts/pull/1919) +* Added memory metric targetMemoryUtilizationPercentage to Xray Horizontal Pod Scaler + +## [103.107.0] - September 26, 2024 +* Added support to read rabbitmq and database secrets from mounted secret files + +## [103.105.0] - August 22, 2024 +* Added support for `serviceAccount.annotations`to be passed to chart [GH-1841](https://github.com/jfrog/charts/pull/1841) +* Updated rabbitmq multi-arch tag version to to `3.13.6-debian-12-r1` + +## [103.102.0] - July 17, 2024 +* Added support of specifying resources constraints for RabbitMQ's pre-upgrade-hook job +* Fixed formatting error associated to the `volumeMounts` for the `panoramic` microservice [GH-1895](https://github.com/jfrog/charts/issues/1895) + +## [103.99.0] - June 18, 2024 +* Fixed #adding colon in image registry breaks deployment with meta label error. [GH-1892](https://github.com/jfrog/charts/pull/1892) + +## [103.97.0] - May 27, 2024 +* Added chart label to xray pods + +## [103.96.0] - Apr 17, 2024 +* Added `rabbitmq.containerSecurityContext.allowPrivilegeEscalation` flag to ensure `RunAsUser` commands cannot bypass their existing sets of permissions. Set to `false` by default +* Updated rabbitmq multi-arch tag version to to `3.12.13-debian-11-r0` + +## [103.95.0] - Apr 2, 2024 +* **IMPORTANT** +* Refactored systemYaml configuration (moved to files/system.yaml instead of key in values.yaml) +* Added ability to provide `extraSystemYaml` configuration in values.yaml which will merge with the existing system yaml when `systemYamlOverride` is not given. [GH-1842](https://github.com/jfrog/charts/pull/1842) +* Update postgresql tag version to `15.6.0-debian-11-r16` + +## [103.94.0] - Mar 27, 2024 +* **IMPORTANT** +* Added image section for `initContainers` instead of `initContainerImage` +* Removed image section for `loggers` +* Added support for `global.verisons.initContainers` to override `initContainers.image.tag` + +## [103.93.0] - Mar 5,2024 +* Updated rabbitmq multi-arch tag version to to `3.12.10-debian-11-r1` +* Fixed - StatefulSet pod annotations changed from range to toYaml [GH-1828](https://github.com/jfrog/charts/issues/1828) +* Updated README.md to create a namespace using `--create-namespace` as part of helm install +* Added a headless service for IPA pod + +## [103.91.0] - Feb 21,2024 +* **IMPORTANT** +* Added `unifiedSecretInstallation` flag which enables single unified secret holding all internal (chart) secrets to `true` by default +* Renamed sizing yaml file names from `xray-sizing-.yaml` to `xray-.yaml` +* **Important change:** +* Update postgresql tag version to `15.2.0-debian-11-r23` +* Renamed `common.xrayUserId` to `podSecurityContext.runAsUser` +* Renamed `common.xrayGroupId` to `podSecurityContext.runAsGroup` and `podSecurityContext.fsGroup` +* Renamed `common.fsGroupChangePolicy` to `podSecurityContext.fsGroupChangePolicy` + +## [103.89.0] - Jan 18,2023 +* Remove fallback section from keda. + +## [103.88.0] - Dec 20,2023 +* Added support for migrating rabbitmq to high-availability quorum queues setup + +## [103.87.0] - Dec 7,2023 +* Update minimum supported kubernetes version to 1.19 +* Added recommended t-shirt sizing configurations under sizing folder +* Added support for rabbitmq high-availability quorum queues clean install setup +* Fix the pre-upgrade-hook for rabbitmq migration to support installations on openshift platforms + +## [103.86.0] - Nov 14,2023 +* Fixed - containerSecurityContext on loggers + +## [103.83.0] - Sep 15,2023 +* Fixed - Support to configure privateRegistry for pre-upgrade-hook + +## [103.80.0] - Jul 16, 2023 +* Added `podSecurityContext.enabled` and `containerSecurityContext.enabled` to support openshift + +## [103.79.0] - Jul 3, 2023 +* Added TLS support for rabbitmq +* Moved common.extraEnvs to statefulset from values.yaml +* Added `extraEnvVars` for each container +* Fixed rabbitmq feature flag conditions for pre-upgrade hook command execution + +## [103.77.0] - Jun 05, 2023 +* Set securePassword to false in a values yaml [GH-1763](https://github.com/jfrog/charts/pull/1763) +* Upgraded to autoscaling/v2 + +## [103.76.0] - May 24, 2023 +* Set Pod disruption budget to none by default if not set in `xray.minAvailable` + +## [103.75.0] - May 19, 2023 +* Fix lint issue when rabbitmq is disabled + +## [103.74.0] - May 2, 2023 +* Inject global image pull secret to the system.yaml for JAS +* Add configuration for JAS images registry and repository in the system.yaml +* Added securityContext for rabbitmq pre-upgrade hook pod + +## [103.72.0] - Apr 07, 2023 +* Adding vm memory memoryHighWatermark field and scheduler field for Rabbitmq Chart +* Update pre-upgrade hook image of rabbitmq + +## [103.70.0] - Mar 21, 2023 +* Updated postgresql multi-arch tag version to `13.10.0-debian-11-r14` +* Updated rabbitmq multi-arch tag version to `3.11.10-debian-11-r5` +* Added pre-upgrade hook for rabbitmq upgrade from 3.8.x to 3.11.x + +## [103.69.0] - Mar 21, 2023 +* **IMPORTANT** +* Added default rabbitmq user creation via `load_definition` secret to support upstream bitnami [11.x chart](https://github.com/bitnami/charts/tree/main/bitnami/rabbitmq#to-1100) +* Updated rabbitmq's chart version to `11.9.3` and image version to `3.11.9-debian-11-r1` +* Updated initContainerImage and logger image to `ubi9/ubi-minimal:9.1.0.1793` +* Added `rabbitmq.featureFlags` to support upgrade from 3.8.x to 3.11.x . More info [here](https://blog.rabbitmq.com/posts/2022/07/required-feature-flags-in-rabbitmq-3.11/) + +## [103.68.0] - Feb 16, 2023 +* Updated initContainerImage and logger image to `ubi9/ubi-minimal:9.1.0.1760` + +## [103.67.0] - Feb 16, 2023 +* Added topologySpreadConstraints to xray pods +* Allow setting executionServiceAesKey in a secret [GH-1685](https://github.com/jfrog/charts/issues/1685) +* Updated postgresql tag version to `13.9.0-debian-11-r11` +* Adding support for PodDisruptionBudget +* Updated jfrogUrl text path to copy +* Added pods/log and events resources to xray serviceaccount +* Added option to pass extra environment variables via `common.extraEnvVars` for xray containers + +## [103.63.0] - Jan 16, 2023 +* Updated initContainerImage and logger image to `ubi8/ubi-minimal:8.7.1049` + +## [103.60.0] - Oct 27, 2022 +* Updated router version to `7.51.0` + +## [103.59.0] - Sep 20, 2022 +* Removed `newProbes.enabled`, default to new probes +* Updated Observability version to `1.11.0` +* Updated initContainerImage to `ubi8/ubi-minimal:8.6-941` +* Added support for annotations for xray statefulset [GH-1665](https://github.com/jfrog/charts/pull/1665) + +## [103.57.0] - Aug 25, 2022 +* Added support for lifecycle hooks for all containers + +## [103.56.0] - Aug 25, 2022 +* Updated initContainerImage and logger Image to `ubi8/ubi-minimal:8.6-854` +* Added `.Values.xray.openMetrics.enabled` flag to enable metrics (defaults to `false`) [GH-1649](https://github.com/jfrog/charts/pull/1649) +* Added flag `xray.schedulerName` to set for the pods the value of schedulerName field [GH-1606](https://github.com/jfrog/charts/issues/1606) +* Updated Observability version to `1.9.3` + +## [103.55.0] - Aug 25, 2022 +* Updated initContainerImage to ubi8/ubi-minimal:8.6-854 +* Updated rabbitmq version to `3.9.21-debian-11-r0` +* Updated router version to `7.45.0` +* Added support to truncate (> 63 chars) for unifiedCustomSecretVolumeName + +## [103.54.0] - July 14, 2022 +* Added new flag "unifiedSecretInstallation" to enables single unified secret holding all the secrets +* Added `executionserviceAWS` key (optional) in values.yaml like joinkey and masterkey [GH-1600](https://github.com/jfrog/charts/pull/1600) +* Updated Observability version to `1.9.2` +* Updated router version to `7.42.0` + +## [103.53.0] - Jun 22, 2022 +* Use an alternate command for `find` to copy custom certificates + +## [103.52.0] - Jun 20, 2022 +* Updated Observability version to `1.9.0` + +## [103.51.0] - May 17, 2022 +* Updated Observability version to `1.7.0` + +## [103.49.0] - Apr 29, 2022 +* Updated Observability version to `1.6.1` +* Updated router version to `7.38.0` +* Update rabbitmq chart and image to 8.31.6 and 3.9.15-debian-10-r5 + +## [103.48.0] - Apr 19, 2022 +* Updated Observability version to `1.5.1` +* Reduce startupProbe `initialDelaySeconds` +* Align all liveness and readiness probes failureThreshold to `5` seconds + +## [103.47.0] - Apr 14, 2022 +* Added support for custom global probes timeout +* Added env variable `XRAY_K8S_ENV` to xray server container + +## [103.46.0] - Mar 23, 2022 +* Updated router version to `7.36.1` +* Updated Observability version to `1.5.0` + +## [103.45.0] - Mar 11, 2022 +* Updated router version to `7.35.0` +* Changed dependency charts repo to `charts.jfrog.io` +* Added support for `global.nodeSelector` applies to xray pods + +## [103.44.0] - Feb 15, 2022 +* Updated router version to `7.32.1` +* Updated Observability version to `1.3.0` +* Added support loggers sidecars to tail a configured log +* Added silent option for curl probes + +## [103.42.0] - Feb 12, 2022 +* Corrected the NetworkPolicy podSelector for RabbitMQ and Postgres +* Option to skip wait-for-db init container with '--set waitForDatabase=false' +* Added support for PriorityClass +* Updated Observability version to `1.2.3` + +## [103.41.0] - Feb 12, 2022 +* Add more user friendly support for pod affinity and anti-affinity +* Pod anti-affinity is now enabled by default (soft rule) +* Added `ResourceQuota` and permissions for xray execution service +* Added support for custom pod annotations using `xray.annotations` +* Added support for setting `fsGroupChangePolicy` +* Add job permissions to use by execution service +* Updated Observability version to `1.2.2` +* Updated router version to `7.30.0` +* Sets the AES key used by execution server to the xray server and analysis containers +* Fix regression in affinity path and revert it to its previous path + +## [103.40.0] - Dec 23, 2021 +* Refactored `database-creds` secret to create only when database values are passed +* Refactored probes to replace httpGet probes with basic exec + curl +* Added new endpoints for probes `/api/v1/system/liveness` and `/api/v1/system/readiness` +* Enabled `newProbes:true` by default to use these endpoints +* Updated Observability version to `1.2.0 +* Fix filebeat sidecar spool file permissions +* Added `extraSecretsPrependReleaseName` to load-definitions secret in rabbitmq subchart +* Updated filebeat sidecar container to `7.16.2` + +## [103.39.0] - Dec 17, 2021 +* Added `server.mailServer` and `server.indexAllBuilds` as optional fields +* Added support for HorizontalPodAutoscaler apiVersion `autoscaling/v2beta2` +* Update postgresql tag version to `13.4.0-debian-10-r39` +* Refactored `router.requiredServiceTypes` to support platform chart + +## [103.37.0] - Nov 26, 2021 +* Fixed incorrect permission for filebeat.yaml [GH-1521](https://github.com/jfrog/charts/issues/1521) +* Moved router.topology.local.requireqservicetypes from system.yaml to router as environment variable +* Updated initContainerImage to `jfrog/ubi-minimal:8.5-204` +* Updated Observability version to `1.1.4` +* Updated router version to `7.28.2` + +## [103.36.0] - Nov 11, 2021 +* Added Observability service + +## [103.35.0] - Oct 14, 2021 +* Added default values cpu and memeory in initContainers +* Updated router version to `7.26.0` +* Updated (`rbac.create` and `serviceAccount.create` to false by default) for least privileges +* Fixed incorrect data type for `Values.router.serviceRegistry.insecure` in default values.yaml [GH-1514](https://github.com/jfrog/charts/pull/1514/files) +* **IMPORTANT** +* Changed init-container images from `alpine` to `ubi8/ubi-minimal` +* Fixed incorrect data type for `Values.router.serviceRegistry.insecure` in default values.yaml [GH-1514](https://github.com/jfrog/charts/pull/1514/files) + +## [103.34.0] - Sep 20, 2021 +* Added min kubeVersion ">= 1.14.0-0" in chart.yaml +* Update alpine tag version to `3.14.2` + +## [103.32.3] - Sep 08, 2021 +* Dropped NET_RAW capability for the containers +* Added support for new probes(set to false by default) +* Updated router version to `7.25.1` + +## [103.30.0] - Aug 13, 2021 +* Update router version to `7.24.1` +* Support global and product specific tags at the same time +* Updated readme of chart to point to wiki. Refer [Installing Xray](https://www.jfrog.com/confluence/display/JFROG/Installing+Xray) +* Added security hardening fixes +* Enabled startup probes for k8s >= 1.20.x +* Changed network policy to allow all ingress and egress traffic +* Added support for serviceRegistry insecure flag in router + +## [103.29.0] - July 19, 2021 +* Added support for graceful shutdown of router container on SIGTERM +* Update router version to `7.21.5` + +## [103.28.1] - July 13, 2021 +* Add support for custom secrets + +## [103.28.0] - July 6, 2021 +* Update router version to `7.21.3` +* Update alpine tag version to `3.14.0` +* Add required services for router container in systemYaml + +## [103.27.0] - June 15, 2021 +* Added configurable `.Values.global.versions.router` in values.yaml + +## [103.26.0] - June 3, 2021 +* Added rabbitmq.nameOverride support for rabbitmq password and url + +## [103.25.2] - May 26, 2021 +* Update router version to `7.19.4` + +## [103.25.1] - May 21, 2021 +* Bumping chart version to align with app version +* Fix broken support for startupProbe for k8s < 1.18.x +* Update router version to `7.18.2` +* Added support for `nameOverride` and `fullnameOverride` in values.yaml +* Fix STS name in hpa + +## [8.0.0] - April 22, 2021 +* **Breaking change:** +* Increased default postgresql persistence size to `300Gi` +* Update postgresql tag version to `13.2.0-debian-10-r55` +* Update postgresql chart version to `10.3.18` in chart.yaml - [10.x Upgrade Notes](https://github.com/bitnami/charts/tree/master/bitnami/postgresql#to-1000) +* If this is a new deployment or you already use an external database (`postgresql.enabled=false`), these changes **do not affect you**! +* If this is an upgrade and you are using the default PostgreSQL (`postgresql.enabled=true`), you need to pass previous 9.x/10.x/12.x's postgresql.image.tag, previous postgresql.persistence.size and databaseUpgradeReady=true +* **IMPORTANT** +* This chart is only helm v3 compatible +* Update Xray to version `3.23.0` - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Xray+Release+Notes#XrayReleaseNotes-Xray3.23) +* Update rabbitmq tag version to `3.8.14-debian-10-r32` +* Update router version to `7.17.5` +* Update alpine tag version to `3.13.5` + +## [7.7.0] - April 6, 2021 +* Update Xray to version `3.22.1` + +## [7.6.1] - April 6, 2021 +* Update alpine tag version to `3.13.4` + +## [7.6.0] - Apr 5, 2021 +* **IMPORTANT** +* Added `charts.jfrog.io` as default JFrog Helm repository + +## [7.5.1] - Mar 31, 2021 +* Update Xray to version `3.21.2` - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Xray+Release+Notes#XrayReleaseNotes-Xray3.21.2) + +## [7.5.0] - Mar 30, 2021 +* Update Xray to version `3.21.0` +* Update router version to `7.17.2` +* Add `timeoutSeconds` to all exec probes - Please refer [here](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes) + +## [7.4.0] - Mar 24, 2021 +* Update Xray to version `3.20.1` +* Update router version to `7.17.1` +* Optimized startupProbe time +* Add support for graceful shutdown + +## [7.3.0] - Mar 18, 2021 +* Add support to startupProbe + +## [7.2.0] - Mar 12, 2021 +* Update Xray to version `3.19.1` + +## [7.1.1] - Mar 9, 2021 +* Update Xray to version `3.18.1` - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Xray+Release+Notes#XrayReleaseNotes-Xray3.18.1) +* Removed bintray URL references in the chart +* Update router version to `7.15.3` + +## [7.1.0] - Mar 03, 2021 +* Update Xray to version `3.18.0` - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Xray+Release+Notes#XrayReleaseNotes-Xray3.18) + +## [7.0.2] - Feb 18, 2021 +* Update router version to `7.15.2` + +## [7.0.1] - Feb 18, 2021 +* Update Xray to version `3.17.4` - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Xray+Release+Notes#XrayReleaseNotes-Xray3.17.4) + +## [7.0.0] - Feb 10, 2021 +* **Breaking changes:** +* Deprecation of rabbitmq-ha chart - [Notes](https://github.com/helm/charts/tree/master/stable/rabbitmq-ha#%EF%B8%8F-deprecated---rabbitmq-high-available) +* Added RABBITMQ_MIGRATION_NOTES.md - Steps for migration of data from rabbitmq-ha to rabbitmq bitnami +* **Important:** Migration to bitnami rabbitmq should be done before upgrading to 7.x chart versions + +## [6.11.0] - Feb 08, 2021 +* Support for custom certificates using secrets +* **Important:** Switched docker images download from `docker.bintray.io` to `releases-docker.jfrog.io` +* Update alpine tag version to `3.13.1` +* Update Xray to version `3.17.2` +* Update router version to `7.12.6` + +## [6.10.0] - Jan 25, 2021 +* Update Xray to version `3.16.0` - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Xray+Release+Notes#XrayReleaseNotes-Xray3.16) +* Added support for passing additionalSpec to xray service resource +* Removed unused variables in values.yaml + +## [6.9.3] - Jan 25, 2021 +* Add support for hostAliases + +## [6.9.2] - Jan 13, 2021 +* Update Xray to version `3.15.3` - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Xray+Release+Notes#XrayReleaseNotes-Xray3.15.3) + +## [6.9.1] - Jan 5, 2021 +* Add support for creating additional kubernetes resources - [refer here](https://github.com/jfrog/log-analytics-prometheus/blob/master/helm/xray-values.yaml) +* Update router version to `7.12.4` + +## [6.9.0] - Dec 31, 2020 +* Update Xray to version `3.15.1` - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Xray+Release+Notes#XrayReleaseNotes-Xray3.15.1) + +## [6.8.3] - Dec 29, 2020 +* Update Xray to version `3.14.3` - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Xray+Release+Notes#XrayReleaseNotes-Xray3.14.3) + +## [6.8.2] - Dec 28, 2020 +* Updated Xray application sizing yamls (values-small.yaml, values-medium.yaml, values-large.yaml) + +## [6.8.1] - Dec 24, 2020 +* Update Xray to version `3.14.1` - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Xray+Release+Notes#XrayReleaseNotes-Xray3.14.1) + +## [6.8.0] - Dec 18, 2020 +* Update Xray to version `3.14.0` + +## [6.7.2] - Dec 14, 2020 +* Added support for passing actualUsername in secrets + +## [6.7.1] - Dec 11, 2020 +* Added configurable `.Values.global.versions.xray` in values.yaml + +## [6.7.0] - Dec 10, 2020 +* Update postgresql tag version to `12.5.0-debian-10-r25` +* Update rabbitmq tag version to `3.8.9-debian-10-r58` +* Update rabbitmq-ha tag version to `3.8.9-alpine` + +## [6.6.0] - Dec 8, 2020 +* Update Xray to version `3.13.0` - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Xray+Release+Notes#XrayReleaseNotes-Xray3.13) +* Updated chart maintainers email + +## [6.5.2] - Dec 4, 2020 +* **Important:** Renamed `.Values.systemYaml` to `.Values.systemYamlOverride` + +## [6.5.1] - Nov 30, 2020 +* Updated port namings on services and pods to allow for istio protocol discovery + +## [6.5.0] - Nov 30, 2020 +* Update Xray to version `3.12.0` - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Xray+Release+Notes#XrayReleaseNotes-Xray3.12) +* Update router version to `7.11.2` +* Update alpine tag version to `3.12.1` + +## [6.4.4] - Nov 20, 2020 +* Support external rabbitmq credentials to come from existing secret + +## [6.4.3] - Nov 16, 2020 +* Support actualUsername for Azure +* Bugfix - Issue with custom image tags + +## [6.4.2] - Nov 16, 2020 +* Update Xray to version 3.11.2 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Xray+Release+Notes#XrayReleaseNotes-Xray3.11) + +## [6.4.1] - Nov 10, 2020 +* Pass system.yaml via external secret for advanced usecases +* Bugfix - stateful set not picking up changes to database secrets + +## [6.4.0] - Nov 9, 2020 +* Update Xray to version 3.11.1 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Xray+Release+Notes#XrayReleaseNotes-Xray3.11) +* Fix values-small,medium,large yaml files + +## [6.3.0] - Nov 3, 2020 +* Change stable repository location to https://charts.helm.sh/stable +* Update bitnami rabbitmq chart to 7.7.1 + +## [6.2.1] - Oct 23, 2020 +* Update router version to `1.4.4` + +## [6.2.0] - Oct 23, 2020 +* Update Xray to version 3.10.3 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Xray+Release+Notes#XrayReleaseNotes-Xray3.10.3) + +## [6.1.2] - Oct 9, 2020 +* Add global support for customInitContainersBegin + +## [6.1.1] - Oct 5, 2020 +* Fixed broken joinkey condition +* Updated UPGRADE_NOTES.md + +## [6.1.0] - Oct 1, 2020 +* Update Xray to version 3.9.1 + +## [6.0.6] - Sep 30, 2020 +* Added support for resources in init containers + +## [6.0.5] - Sep 28, 2020 +* Update Xray to version 3.8.8 - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Xray+Release+Notes#XrayReleaseNotes-Xray3.8.8) +* Added support for labels for STS and pods + +## [6.0.4] - Sep 25, 2020 +* Update Xray to version `3.8.7` - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Xray+Release+Notes#XrayReleaseNotes-Xray3.8.7) +* Update filebeat version to `7.9.2` + +## [6.0.3] - Sep 22, 2020 +* Readme Updates + +## [6.0.2] - Sep 17, 2020 +* Update Xray to version `3.8.6` - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Xray+Release+Notes#XrayReleaseNotes-Xray3.8.6) + +## [6.0.1] - Sep 16, 2020 +* Update Xray to version `3.8.5` - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Xray+Release+Notes#XrayReleaseNotes-Xray3.8.5) +* Added additional rabbitmq configuration +* Added back `common.xrayVersion` in values.yaml + +## [6.0.0] - Sep 2, 2020 +* **Breaking change:** Changed `imagePullSecrets` value from string to list. +* **Breaking change:** Added `image.registry` and `common.xrayVersion` is changed to `image.tag` under analysis,indexer,persist,server and router sections +* Added support for global values +* Update postgresql chart version to `9.3.4` +* Updated chart maintainers in chart.yaml +* Update router version to `1.4.3` + +## [5.0.0] - Aug 24, 2020 +* Update Xray to version `3.8.2` - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Xray+Release+Notes#XrayReleaseNotes-Xray3.8.2) +* Update postgresql chart version to `9.3.2` - [9.x Upgrade Notes](https://github.com/bitnami/charts/tree/master/bitnami/postgresql#900) +* **IMPORTANT** +* If this is a new deployment or you already use an external database (`postgresql.enabled=false`), these changes **do not affect you**! +* If this is an upgrade and you are using the default PostgreSQL (`postgresql.enabled=true`), you need to pass previous 9.x/10.x's postgresql.image.tag and databaseUpgradeReady=true +* Update postgresql tag version to `12.3.0-debian-10-r71` +* Update rabbitmq tag version to `3.8.7-debian-10-r3` +* Update rabbitmq-ha tag version to `3.8.7-alpine` + +## [4.2.1] - Aug 14, 2020 +* Added support for external rabbitmq +* Added support for Load Definitions for rabbitmq subchart when `rabbitmq.enabled=true` . Please refer [here](https://github.com/bitnami/charts/tree/master/bitnami/rabbitmq#load-definitions) + +## [4.2.0] - Aug 13, 2020 +* Update Xray to version `3.8.0` - [Release Notes](https://www.jfrog.com/confluence/display/JFROG/Xray+Release+Notes#XrayReleaseNotes-Xray3.8) +* Update rabbitmq-ha tag version to `3.8.6-alpine` +* Update rabbitmq tag version to `3.8.6-debian-10-r1` + +## [4.1.3] - Jul 28, 2020 +* Add tpl to external database secrets. +* Modified `scheme` to `xray.scheme` + +## [4.1.2] - Jul 16, 2020 +* Added support for `common.customSidecarContainers` to create custom sidecar containers +* Added support for `common.configMaps` to create custom configMaps +* Added README for Establishing TLS and Adding certificates. Please refer [here](https://github.com/jfrog/charts/blob/master/stable/xray/README.md#establishing-tls-and-adding-certificates) +* Update router version to `1.4.2` + +## [4.1.1] - Jul 10, 2020 +* Move some postgresql values to where they should be according to the subchart. + +## [4.1.0] - Jul 9, 2020 +* Update Xray to version `3.6.2` - https://www.jfrog.com/confluence/display/JFROG/Xray+Release+Notes#XrayReleaseNotes-Xray3.6.2 +* Update rabbitmq-ha tag version to 3.8.5-alpine +* **IMPORTANT** +* Added ChartCenter Helm repository in README + +## [4.0.1] - Jul 3, 2020 +* Added compatability to support latest 7.x rabbitmq subchart when `rabbitmq.enabled=true` +* Update RabbitMQ chart to v7.4.3 +* **IMPORTANT** +* RabbitMQ 7.x chart is [not compatible](https://github.com/bitnami/charts/tree/master/bitnami/rabbitmq#to-700) with previous rabbitmq 6.x chart in Xray 3.x chart +* Please refer [here](https://github.com/jfrog/charts/blob/master/stable/xray/README.md#special-upgrade-notes) for upgrade notes + +## [4.0.0] - Jun 26, 2020 +* Update postgresql tag version to `10.13.0-debian-10-r38` +* Update alpine tag version to `3.12` +* Update rabbitmq tag version to 3.8.5-debian-10-r14 +* Update RabbitMQ chart to v7.3.3 +* Update RabbitMQ-HA chart to v1.46.4 +* **IMPORTANT** +* If this is a new deployment or you already use an external database (`postgresql.enabled=false`), these changes **do not affect you**! +* If this is an upgrade and you are using the default PostgreSQL (`postgresql.enabled=true`), you need to pass postgresql.image.tag=9.6.18-debian-10-r7 and databaseUpgradeReady=true + +## [3.5.1] - Jun 25, 2020 +* Added prestartcommand to router container to match same mechanism in all other xray containers + +## [3.5.0] - Jun 22, 2020 +* Update Xray to version `3.5.2` - https://www.jfrog.com/confluence/display/JFROG/Xray+Release+Notes#XrayReleaseNotes-Xray3.5.2 +* Update alpine to version `3.12` + +## [3.4.2] - Jun 13, 2020 +* Adding tpl to customVolumeMounts +* Fix `replicaCount` in README.md + +## [3.4.1] - Jun 12, 2020 +* Fix broken customVolumeMounts + +## [3.4.0] - Jun 1, 2020 +* Update Xray to version `3.4.0` - https://www.jfrog.com/confluence/display/JFROG/Xray+Release+Notes#XrayReleaseNotes-Xray3.4 +* Added Upgrade Notes in README for 3.x upgrades - https://github.com/jfrog/charts/blob/master/stable/mission-control/README.md#special-upgrade-notes +* Update router version to `1.4.0` +* Update postgresql tag version to `9.6.18-debian-10-r7` +* Added tpl to support external database secrets values +* Added custom volumes/volumesMounts under `common` +* Removed custom volumes from each specific service +* Fixes Broken upgrades of charts - use `kubectl delete statefulsets ` and run helm upgrade + +## [3.3.2] - May 20, 2020 +* Skip warning in NOTES if `xray.masterKeySecretName` is set + +## [3.3.1] - May 01, 2020 +* Adding tpl to values to support jfrogUrl + +## [3.3.0] - Apr 28, 2020 +* Update Xray to version `3.3.0` - https://www.jfrog.com/confluence/display/JFROG/Xray+Release+Notes#XrayReleaseNotes-Xray3.3 + +## [3.2.4] - Apr 20, 2020 +* Adding tpl to xray-statefulset for JF_SHARED_PASSWORD + +## [3.2.3] - Apr 15, 2020 +* Support existingsecrets for rabbitmq/rabbitmq-ha passwords +* Bump router version to `1.3.0` +* Bump postgresql tag version to `9.6.17-debian-10-r72` in values.yaml + +## [3.2.2] - Apr 15, 2020 +* Fix broken rabbitmq support when `rabbitmq.enabled=true` + +## [3.2.1] - Apr 14, 2020 +* customInitContainer identation template fix + +## [3.2.0] - Apr 13, 2020 +* Bump RabbitMQ chart to v6.25.2 +* Bump RabbitMQ-HA chart to v1.44.2 + +## [3.1.1] - April 13, 2020 +* Updated helm v3 commands + +## [3.1.0] - April 10, 2020 +* Use dependency charts from `https://charts.bitnami.com/bitnami` +* Bump postgresql chart version to `8.7.3` in requirements.yaml + +## [3.0.28] - April 8, 2020 +* Support database credentials as secrets + +## [3.0.27] - April 2, 2020 +* Support masterKey and joinKey as secrets + +## [3.0.26] - Mar 31, 2020 +* Update Xray to version `3.2.3` +* Bump router to version `1.2.1` + +## [3.0.25] - Mar 31, 2020 +* README fixes + +## [3.0.24] - Mar 27, 2020 +* Add support for masterKey as secret + +## [3.0.23] - Mar 23, 2020 +* Use `postgresqlExtendedConf` for setting custom PostgreSQL configuration (instead of `postgresqlConfiguration`) + +## [3.0.22] - Mar 17, 2020 +* Changed all single quotes to double quotes in values files + +## [3.0.21] - Mar 12, 2020 +* Fix for xray pvc + +## [3.0.20] - Mar 11, 2020 +* Unified charts public release + +## [3.0.19] - Mar 9, 2020 +* Cleanup `ingress` code + fixes + +## [3.0.18] - Mar 9, 2020 +* Add default `joinKey` value + +## [3.0.17] - Mar 6, 2020 +* Cleanup of not needed values +* Bump PostgreSQL chart to v8.4.1 +* Bump RabbitMQ chart to v6.18.1 +* Bump RabbitMQ-HA chart to v1.41.0 + +## [3.0.16] - Mar 4, 2020 +* Add support for disabling `consoleLog` in `system.yaml` file + +## [3.0.15] - Feb 28, 2020 +* Fix reference of incorrect key to set external database url from documentation + +## [3.0.14] - Feb 27, 2020 +* Add an annotation with the checksum of the `system.yaml` file to make sure the pods restart after a configuration change + +## [3.0.13] - Feb 26, 2020 +* Update Xray to version `3.2.0` + +## [3.0.12] - Feb 24, 2020 +* Update Xray to version `3.0.1` + +## [1.3.8] - Feb 18, 2020 +* Update Xray version to 2.11.4 + +## [1.3.7] - Feb 13, 2020 +* Fix Xray README `ingerss.additionalRules` description + +## [1.3.6] - Feb 11, 2020 +* Add support for `preStartCommand` + +## [1.3.5] - Feb 2, 2020 +* Add a comment stating that it is recommended to use external databases with a static password for production installations + +## [1.3.4] - Jan 30, 2020 +* Add the option to configure resources for the logger containers + +## [1.3.3] - Dec 31, 2019 +* Update Xray version to 2.11.3 + +## [1.3.2] - Dec 23, 2019 +* Mark empty map values with `{}` + +## [1.3.1] - Dec 1, 2019 +* Added custom volume mounts to the server stateful set +* Added custom annotations to the server, indexer, analysis, and persist stateful sets + +## [1.3.0] - Dec 3, 2019 +* Update Xray version to 2.11.0 + +## [1.2.9] - Nov 24, 2019 +* Fix the Xray probes path + +## [1.2.8] - Nov 21, 2019 +* Make the Xray probes customisable + +## [1.2.7] - Nov 21, 2019 +* Prevent probes failing on 403 (Forbidden) - fixes + +## [1.2.6] - Nov 20, 2019 +* Prevent probes failing on 403 (Forbidden) + +## [1.2.5] - Nov 20, 2019 +* Update Xray logo + +## [1.2.4] - Nov 7, 2019 +* Update Xray version to 2.10.7 + +## [1.2.3] - Oct 28, 2019 +* Update Xray version to 2.10.5 + +## [1.2.2] - Oct 26, 2019 +* Update Xray version to 2.10.4 + +## [1.2.1] - Oct 7, 2019 +* Update Xray version to 2.10.1 + +## [1.2.0] - Oct 3, 2019 +* Update Xray version to 2.10.0 + +## [1.1.1] - Sep 26, 2019 +* Add support for running custom init containers before the predefined init containers using `common.customInitContainersBegin` + +## [1.1.0] - Sep 3, 2019 +* Update Xray version to 2.9.0 + +## [1.0.5] - Aug 13, 2019 +* Add the option to provide a precreated secret for XRAY_MASTER_KEY + +## [1.0.4] - Aug 11, 2019 +* Add information about Xray ingress additionalRules + +## [1.0.3] - Jul 22, 2019 +* Change Ingress API to be compatible with recent kubernetes versions + +## [1.0.2] - Jul 15, 2019 +* Add the option to provide ingress additional rules + +## [1.0.1] - Jul 15, 2019 +* Updated README.md to the new defaults. + +## [1.0.0] - Jul 9, 2019 +* Set default server and indexer services persistence to `true`. +* **IMPORTANT:** + * To upgrade from a previous Xray deployment, you have to pass the `--force` flag to the `helm upgrade` command. + * This is mandatory to force the change services persistence to `true`. + * This change will recreate the server and indexer pods! + * **NOTE:** Don't forget to pass the DBs passwords to the `helm upgrade` if these were auto generated. See [README.md](README.md) for details in the **Upgrade** section. + +## [0.12.17] - Jul 1, 2019 +* Update Xray version to 2.8.9 + +## [0.12.16] - June 25, 2019 +* Update Xray version to 2.8.8 + +## [0.12.15] - June 24, 2019 +* Update chart maintainers + +## [0.12.14] - June 23, 2019 +* Add values files for small, medium and large installations + +## [0.12.13] - June 20, 2019 +* Document the mongoDB resources values suggestion +* Fix xray-server service annotation + +## [0.12.12] - June 17, 2019 +* Optional support for PostgreSQL with TLS + +## [0.12.11] - June 7, 2019 +* Update Xray version to 2.8.7 +* Add persistence to Server and Indexer + +## [0.12.10] - May 28, 2019 +* Update Xray version to 2.8.6 + +## [0.12.9] - May 24, 2019 +* Update stateful set api and add serviceName spec + +## [0.12.8] - May 20, 2019 +* Fix missing logger image tag + +## [0.12.7] - Apr 16, 2019 +* Updated Xray version to 2.8.3 + +## [0.12.6] - Apr 15, 2019 +* Updated Xray version to 2.8.2 + +## [0.12.5] - May 12, 2019 +* Updated rabbitmq-ha chart version to 1.26.0 + +## [0.12.4] - Apr 15, 2019 +* Simplify handling connection strings setup in `xray_config.yaml` to better support ampersand in external connection strings +* **IMPORTANT:** If using an external connection string for PostgreSQL or MongoDB, **do not escape** the ampersand with `\` + +## [0.12.3] - Apr 15, 2019 +* Move `skipEntLicCheckForCloud: true` config to be part of default Xray config + +## [0.12.2] - Apr 10, 2019 +* Added support for customizing the xray_config.yaml file using a configmap + +## [0.12.1] - Apr 9, 2019 +* Added Xray server service annotations + +## [0.12.0] - Apr 9, 2019 +* Updated Xray version to 2.8.0 + +## [0.11.2] - Apr 7, 2019 +* Add network policy support + +## [0.11.1] - Mar 26, 2019 +* Add information about upgrading Xray with auto-generated postgres password + +## [0.11.0] - Mar 26, 2019 +* Switched to StatefulSets to preserve micro-service Ids + +## [0.10.5] - Mar 18, 2019 +* Added label selector for Xray ingress + +## [0.10.4] - March 15, 2019 +* Revert securityContext change that was causing issues + +## [0.10.3] - March 13, 2019 +* Move securityContext to container level + +## [0.10.2] - March 12, 2019 +* Updated Xray version to 2.7.3 + +## [0.10.1] - March 10, 2019 +* Updated values.yaml added an important comment for the MongoDB requirements. + +## [0.10.0] - Mar 3, 2019 +* Support loggers sidecars to tail a configured log + +## [0.9.0] - Feb 14, 2019 +* Updated Xray version to 2.7.0 + +## [0.8.5] - Feb 11, 2019 +* Add an option to set the indexAllBuilds configuration option in order to index all the builds in artifactory + +## [0.8.4] - Feb 6, 2019 +* Updated Postgres version to 9.6.11 + +## [0.8.3] - Feb 4, 2019 +* Updated Xray version to 2.6.3 + +## [0.8.2] - Jan 24, 2019 +* Added missing documentation about using `mongodb.enabled=false` when using external MongoDB + +## [0.8.1] - Jan 22, 2019 +* Added support for `common.customInitContainers` to create custom init containers + +## [0.8.0] - Jan 1, 2019 +* Updated Xray version to 2.6.0 + +## [0.7.8] - Dec 18, 2018 +* Fix for 0.7.7 (Improve server health probes to support GKE ingress controller. Fixes https://github.com/jfrog/charts/issues/149) + +## [0.7.7] - Dec 18, 2018 +* Improve server health probes to support GKE ingress controller. Fixes https://github.com/jfrog/charts/issues/149 + +## [0.7.6] - Dec 11, 2018 +* Using secret for external databases. Fixes https://github.com/jfrog/charts/issues/73 + +## [0.7.5] - Nov 14, 2018 +* Fix bad example in [README.md]. Fixes https://github.com/jfrog/charts/issues/127. + +## [0.7.4] - Nov 14, 2018 +* Fix indent of `nodeSelector`, `affinity` and `tolerations` in the templates + +## [0.7.3] - Nov 11, 2018 +* Updated Xray version to 2.4.6 + +## [0.7.2] - Nov 4, 2018 +* Replace POSTGRESS_ with POSTGRES_ (remove double S) + +## [0.7.1] - Oct 30, 2018 +* Updated Xray version to 2.4.2 + +## [0.7.0] - Oct 29, 2018 +* Update postgresql chart to version 0.9.5 to be able and use `postgresConfig` options + +## [0.6.3] - Oct 17, 2018 +* Add Apache 2.0 license + +## [0.6.2] - Oct 16, 2018 +* Updated Xray version to 2.4.1 + +## [0.6.1] - Oct 11, 2018 +* Allows ingress default `backend` to be enabled or disabled (defaults to enabled) +* Allows rabbitmq to be used instead of rabbitmq-ha by settings rabbitmq-ha.enabled: false and rabbitmq.enabled: true + +## [0.6.0] - Oct 11, 2018 +* Updated Xray version to 2.4.0 + +## [0.5.6] - Oct 9, 2018 +* Quote ingress hosts to support wildcard names + +## [0.5.5] - Oct 2, 2018 +* Add `helm repo add jfrog https://charts.jfrog.io` to README + +## [0.5.4] - Sep 30, 2018 +* Add pods nodeSelector, affinity and tolerations + +## [0.5.3] - Sep 26, 2018 +* Updated Xray version to 2.3.3 + +## [0.5.1] - Sep 13, 2018 +* Per service replica count + +## [0.5.0] - Sep 3, 2018 +* New RabbitMQ HA helm chart version 1.9.1 +* Updated Xray version to 2.3.2 + +## [0.4.1] - Aug 22, 2018 +* Updated Xray version to 2.3.0 + +## [0.4.0] - Aug 22, 2018 +* Enabled RBAC support +* Added ingress support +* Updated Xray version to 2.2.4 diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/Chart.lock b/stable/jfrog-platform/local_dependancy_charts/xray/Chart.lock new file mode 100644 index 000000000..f0d17b970 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/Chart.lock @@ -0,0 +1,12 @@ +dependencies: +- name: postgresql + repository: https://charts.jfrog.io/ + version: 15.5.20 +- name: rabbitmq + repository: https://charts.jfrog.io/ + version: 14.6.6 +- name: catalog + repository: https://charts.jfrog.io/ + version: 101.10.0 +digest: sha256:f5128e32fcfd3e3e9e163779bba2e576c24709ac38f7d58ce3217c114a4ca4c8 +generated: "2025-01-08T14:54:51.409033+05:30" diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/Chart.yaml b/stable/jfrog-platform/local_dependancy_charts/xray/Chart.yaml new file mode 100644 index 000000000..3de3e26ce --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/Chart.yaml @@ -0,0 +1,31 @@ +apiVersion: v2 +appVersion: 3.111.15 +dependencies: +- condition: postgresql.enabled + name: postgresql + repository: https://charts.jfrog.io/ + version: 15.5.20 +- condition: rabbitmq.enabled + name: rabbitmq + repository: https://charts.jfrog.io/ + version: 14.6.6 +- condition: catalog.enabled + name: catalog + repository: https://charts.jfrog.io/ + version: 101.10.0 +description: Universal component scan for security and license inventory and impact + analysis +home: https://www.jfrog.com/xray/ +icon: https://raw.githubusercontent.com/jfrog/charts/master/stable/xray/logo/xray-logo.png +keywords: +- xray +- jfrog +kubeVersion: '>= 1.19.0-0' +maintainers: +- email: installers@jfrog.com + name: Chart Maintainers at JFrog +name: xray +sources: +- https://github.com/jfrog/charts +type: application +version: 103.111.15 diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/LICENSE b/stable/jfrog-platform/local_dependancy_charts/xray/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/README.md b/stable/jfrog-platform/local_dependancy_charts/xray/README.md new file mode 100644 index 000000000..fb953b032 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/README.md @@ -0,0 +1,66 @@ +# JFrog Xray HA on Helm Chart + +**IMPORTANT!** Our Helm Chart docs have moved to our main documentation site. Below you will find the basic instructions for installing and deleting Xray. For all other information, refer to [Installing Xray](https://www.jfrog.com/confluence/display/JFROG/Installing+Xray). + +## Prerequisites Details + +* Kubernetes 1.14+ + +## Chart Details + +This chart will do the following: + +* Optionally deploy PostgreSQL (**NOTE:** For production grade installations it is recommended to use an external PostgreSQL) +* Deploy RabbitMQ (optionally as an HA cluster) +* Deploy JFrog Xray micro-services + +## Requirements + +- A running Kubernetes cluster + - Dynamic storage provisioning enabled + - Default StorageClass set to allow services using the default StorageClass for persistent storage +- A running Artifactory +- [Kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) installed and setup to use the cluster +- [Helm](https://helm.sh/) v2 or v3 installed + + +## Install JFrog Xray + +### Add the JFrog Helm Repository + +Before installing JFrog helm charts, you will need to add the [JFrog helm repository](https://charts.jfrog.io) to your Helm client. +```bash +helm repo add jfrog https://charts.jfrog.io +helm repo update +``` + +### Install the Chart + +#### Artifactory Connection Details + +To connect Xray to your Artifactory installation, you will need to use a join key. To learn how to retrieve the connection details of your Artifactory installation (join key and JFrog URL) from the UI, see https://www.jfrog.com/confluence/display/JFROG/General+Security+Settings#GeneralSecuritySettings-ViewingtheJoinKey. + +### Initiate Installation +Provide the join key and JFrog URL as a parameter to the Xray chart installation: + +```bash +helm upgrade --install xray --set xray.joinKey= \ + --set xray.jfrogUrl= jfrog/xray --namespace xray --create-namespace +``` + +### Apply Sizing configurations to the Chart +To apply the chart with recommended sizing configurations : +For small configurations : +```bash +helm upgrade --install xray jfrog/xray -f sizing/xray-sizing-small.yaml --namespace xray --create-namespace +``` + +## Uninstalling Xray + +**IMPORTANT:** Uninstalling Xray using the commands below will also delete your data volumes and you will lose all of your data. You must back up all this information before deletion. + +To uninstall Xray use the following command. + +```bash +helm uninstall xray --namespace xray && sleep 90 && kubectl delete pvc -l app=xray +``` diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/ci/default-values.yaml b/stable/jfrog-platform/local_dependancy_charts/xray/ci/default-values.yaml new file mode 100644 index 000000000..b25aa81b5 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/ci/default-values.yaml @@ -0,0 +1,90 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. +# If this is an upgrade over an existing Xray 3.x, explicitly pass 'unifiedUpgradeAllowed=true' to upgrade. +unifiedUpgradeAllowed: true +databaseUpgradeReady: true +xray: + jfrogUrl: http://rt-artifactory.rt:8082 +common: + persistence: + enabled: false +# To Fix ct tool --reuse-values - PASSWORDS ERROR: you must provide your current passwords when upgrade the release +postgresql: + auth: + # jfrog-ignore + password: "password1" + primary: + persistence: + enabled: false + +rabbitmq: + auth: + username: guest + password: rabbitmqpass + persistence: + enabled: false + +server: + resources: + requests: + memory: "300Mi" + cpu: "100m" + limits: + memory: "4Gi" + cpu: "3" + +analysis: + resources: + requests: + memory: "300Mi" + cpu: "50m" + limits: + memory: "4Gi" + cpu: "3" + +persist: + resources: + requests: + memory: "300Mi" + cpu: "50m" + limits: + memory: "4Gi" + cpu: "3" + +indexer: + resources: + requests: + memory: "300Mi" + cpu: "50m" + limits: + memory: "4Gi" + cpu: "4" + +sbom: + enabled: true + resources: + requests: + memory: "300Mi" + cpu: "50m" + limits: + memory: "4Gi" + cpu: "3" + +panoramic: + enabled: false + resources: + requests: + memory: "300Mi" + cpu: "50m" + limits: + memory: "4Gi" + cpu: "3" + + +policyenforcer: + resources: + requests: + memory: "300Mi" + cpu: "50m" + limits: + memory: "4Gi" + cpu: "3" \ No newline at end of file diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/ci/global-section-values.yaml b/stable/jfrog-platform/local_dependancy_charts/xray/ci/global-section-values.yaml new file mode 100644 index 000000000..072dca5ca --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/ci/global-section-values.yaml @@ -0,0 +1,155 @@ +unifiedUpgradeAllowed: true +databaseUpgradeReady: true +postgresql: + postgresqlPassword: xray + persistence: + enabled: false +rabbitmq: + auth: + username: guest + password: password + persistence: + enabled: false + +common: + persistence: + enabled: false + customInitContainersBegin: | + - name: "custom-init-begin-local" + image: {{ include "xray.getImageInfoByValue" (list . "initContainers") }} + imagePullPolicy: {{ .Values.initContainers.image.pullPolicy }} + command: + - 'sh' + - '-c' + - 'touch {{ .Values.xray.persistence.mountPath }}/init-begin-local' + volumeMounts: + - mountPath: "{{ .Values.xray.persistence.mountPath }}" + name: data-volume + customVolumes: | + - name: custom-volume-local + emptyDir: + sizeLimit: 100Mi + customVolumeMounts: | + - name: custom-volume-local + mountPath: "/scriptslocal" + customInitContainers: | + - name: "custom-init-local" + image: {{ include "xray.getImageInfoByValue" (list . "initContainers") }} + imagePullPolicy: {{ .Values.initContainers.image.pullPolicy }} + command: + - 'sh' + - '-c' + - 'touch {{ .Values.xray.persistence.mountPath }}/init-local' + volumeMounts: + - mountPath: "{{ .Values.xray.persistence.mountPath }}" + name: data-volume + customSidecarContainers: | + - name: "sidecar-list-local" + image: {{ include "xray.getImageInfoByValue" (list . "initContainers") }} + imagePullPolicy: {{ .Values.initContainers.image.pullPolicy }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - NET_RAW + command: ["sh","-c","echo 'Sidecar is running in local' >> /scriptslocal/sidecar.txt; cat /scriptslocal/sidecar.txt; while true; do sleep 30; done"] + volumeMounts: + - mountPath: "/scriptslocal" + name: custom-volume-local + resources: + requests: + memory: "32Mi" + cpu: "50m" + limits: + memory: "128Mi" + cpu: "100m" + +global: + jfrogUrl: http://rt-artifactory.rt:8082 + masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF + joinKey: EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE + customInitContainersBegin: | + - name: "custom-init-begin-global" + image: {{ include "xray.getImageInfoByValue" (list . "initContainers") }} + imagePullPolicy: {{ .Values.initContainers.image.pullPolicy }} + command: + - 'sh' + - '-c' + - 'touch {{ .Values.xray.persistence.mountPath }}/init-begin-global' + volumeMounts: + - mountPath: "{{ .Values.xray.persistence.mountPath }}" + name: data-volume + customVolumes: | + - name: custom-volume-global + emptyDir: + sizeLimit: 100Mi + customVolumeMounts: | + - name: custom-volume-global + mountPath: "/scriptsglobal" + customInitContainers: | + - name: "custom-init-global" + image: {{ include "xray.getImageInfoByValue" (list . "initContainers") }} + imagePullPolicy: {{ .Values.initContainers.image.pullPolicy }} + command: + - 'sh' + - '-c' + - 'touch {{ .Values.xray.persistence.mountPath }}/init-global' + volumeMounts: + - mountPath: "{{ .Values.xray.persistence.mountPath }}" + name: data-volume + customSidecarContainers: | + - name: "sidecar-list-global" + image: {{ include "xray.getImageInfoByValue" (list . "initContainers") }} + imagePullPolicy: {{ .Values.initContainers.image.pullPolicy }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - NET_RAW + command: ["sh","-c","echo 'Sidecar is running in global' >> /scriptsglobal/sidecar.txt; cat /scriptsglobal/sidecar.txt; while true; do sleep 30; done"] + volumeMounts: + - mountPath: "/scriptsglobal" + name: custom-volume-global + resources: + requests: + memory: "32Mi" + cpu: "50m" + limits: + memory: "128Mi" + cpu: "100m" + +server: + resources: + requests: + memory: "300Mi" + cpu: "100m" + limits: + memory: "4Gi" + cpu: "3" + +analysis: + resources: + requests: + memory: "300Mi" + cpu: "50m" + limits: + memory: "4Gi" + cpu: "3" + +persist: + resources: + requests: + memory: "300Mi" + cpu: "50m" + limits: + memory: "4Gi" + cpu: "3" + +indexer: + resources: + requests: + memory: "300Mi" + cpu: "50m" + limits: + memory: "4Gi" + cpu: "4" diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/ci/test-rabbitmq-haQuorum-values.yaml b/stable/jfrog-platform/local_dependancy_charts/xray/ci/test-rabbitmq-haQuorum-values.yaml new file mode 100644 index 000000000..11de8207c --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/ci/test-rabbitmq-haQuorum-values.yaml @@ -0,0 +1,65 @@ +# CI values for Xray +# If this is an upgrade over an existing Xray 3.x, explicitly pass 'unifiedUpgradeAllowed=true' to upgrade. +unifiedUpgradeAllowed: true +databaseUpgradeReady: true +xray: + jfrogUrl: http://rt-artifactory.rt:8082 +common: + persistence: + enabled: false + +postgresql: + postgresqlPassword: xray + persistence: + enabled: false + +rabbitmq: + replicaCount: 3 + auth: + username: guest + password: password + persistence: + enabled: false + podManagementPolicy: Parallel + +global: + xray: + rabbitmq: + haQuorum: + enabled: true + +server: + resources: + requests: + memory: "300Mi" + cpu: "100m" + limits: + memory: "4Gi" + cpu: "3" + +analysis: + resources: + requests: + memory: "300Mi" + cpu: "50m" + limits: + memory: "4Gi" + cpu: "3" + +persist: + resources: + requests: + memory: "300Mi" + cpu: "50m" + limits: + memory: "4Gi" + cpu: "3" + +indexer: + resources: + requests: + memory: "300Mi" + cpu: "50m" + limits: + memory: "4Gi" + cpu: "4" diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/ci/test-rabbitmq-replicaCount-values.yaml b/stable/jfrog-platform/local_dependancy_charts/xray/ci/test-rabbitmq-replicaCount-values.yaml new file mode 100644 index 000000000..f0a970860 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/ci/test-rabbitmq-replicaCount-values.yaml @@ -0,0 +1,58 @@ +# CI values for Xray +# If this is an upgrade over an existing Xray 3.x, explicitly pass 'unifiedUpgradeAllowed=true' to upgrade. +unifiedUpgradeAllowed: true +databaseUpgradeReady: true +xray: + jfrogUrl: http://rt-artifactory.rt:8082 +common: + persistence: + enabled: false + +postgresql: + postgresqlPassword: xray + persistence: + enabled: false + +rabbitmq: + replicaCount: 3 + auth: + username: guest + password: password + persistence: + enabled: false + +server: + resources: + requests: + memory: "300Mi" + cpu: "100m" + limits: + memory: "4Gi" + cpu: "3" + +analysis: + resources: + requests: + memory: "300Mi" + cpu: "50m" + limits: + memory: "4Gi" + cpu: "3" + +persist: + resources: + requests: + memory: "300Mi" + cpu: "50m" + limits: + memory: "4Gi" + cpu: "3" + +indexer: + resources: + requests: + memory: "300Mi" + cpu: "50m" + limits: + memory: "4Gi" + cpu: "4" diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/ci/test-values.yaml b/stable/jfrog-platform/local_dependancy_charts/xray/ci/test-values.yaml new file mode 100644 index 000000000..c236816d8 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/ci/test-values.yaml @@ -0,0 +1,175 @@ +# CI values for Xray +# If this is an upgrade over an existing Xray 3.x, explicitly pass 'unifiedUpgradeAllowed=true' to upgrade. +unifiedUpgradeAllowed: true +databaseUpgradeReady: true +xray: + jfrogUrl: http://rt-artifactory.rt:8082 + unifiedSecretInstallation: true + openMetrics: + enabled: true + annotations: + key1: "value1" + key2: "value2" + priorityClass: + create: true + +common: + persistence: + enabled: false + fsGroupChangePolicy: "OnRootMismatch" + +postgresql: + postgresqlPassword: xray + persistence: + enabled: false + +rabbitmq: + auth: + username: guest + password: password + persistence: + enabled: false + +initContainers: + resources: + requests: + memory: "64Mi" + cpu: "10m" + limits: + memory: "128Mi" + cpu: "250m" + +autoscaling: + enabled: true + minReplicas: 1 + maxReplicas: 3 + targetCPUUtilizationPercentage: 70 +rbac: + create: true +serviceAccount: + create: true + automountServiceAccountToken: true + +## filebeat sidecar +filebeat: + enabled: true + filebeatYml: | + logging.level: info + path.data: {{ .Values.xray.persistence.mountPath }}/log/filebeat + name: xray-filebeat + queue.spool: + file: + permissions: 0760 + filebeat.inputs: + - type: log + enabled: true + close_eof: ${CLOSE:false} + paths: + - {{ .Values.xray.persistence.mountPath }}/log/*.log + fields: + service: "jfxr" + log_type: "xray" + output.file: + path: "/tmp/filebeat" + filename: filebeat + readinessProbe: + exec: + command: + - sh + - -c + - | + #!/usr/bin/env bash -e + curl --fail 127.0.0.1:5066 + +server: + resources: + requests: + memory: "300Mi" + cpu: "100m" + limits: + memory: "4Gi" + cpu: "3" + # Add lifecycle hooks for container + lifecycle: + postStart: + exec: + command: ["/bin/sh", "-c", "echo Hello from the server postStart handler >> /tmp/message"] + preStop: + exec: + command: ["/bin/sh", "-c", "echo Hello from the server postStart handler >> /tmp/message"] + + statefulset: + annotations: + xray: test + +analysis: + resources: + requests: + memory: "300Mi" + cpu: "50m" + limits: + memory: "4Gi" + cpu: "3" + # Add lifecycle hooks for container + lifecycle: + postStart: + exec: + command: ["/bin/sh", "-c", "echo Hello from the analysis postStart handler >> /tmp/message"] + preStop: + exec: + command: ["/bin/sh", "-c", "echo Hello from the analysis postStart handler >> /tmp/message"] + + +persist: + resources: + requests: + memory: "300Mi" + cpu: "50m" + limits: + memory: "4Gi" + cpu: "3" + # Add lifecycle hooks for container + lifecycle: + postStart: + exec: + command: ["/bin/sh", "-c", "echo Hello from the persist postStart handler >> /tmp/message"] + preStop: + exec: + command: ["/bin/sh", "-c", "echo Hello from the persist postStart handler >> /tmp/message"] + + +indexer: + resources: + requests: + memory: "300Mi" + cpu: "50m" + limits: + memory: "4Gi" + cpu: "4" + # Add lifecycle hooks for container + lifecycle: + postStart: + exec: + command: ["/bin/sh", "-c", "echo Hello from the indexer postStart handler >> /tmp/message"] + preStop: + exec: + command: ["/bin/sh", "-c", "echo Hello from the indexer postStart handler >> /tmp/message"] + +router: + # Add lifecycle hooks for container + lifecycle: + postStart: + exec: + command: ["/bin/sh", "-c", "echo Hello from the router postStart handler >> /tmp/message"] + preStop: + exec: + command: ["/bin/sh", "-c", "echo Hello from the router postStart handler >> /tmp/message"] +observability: + # Add lifecycle hooks for container + lifecycle: + postStart: + exec: + command: ["/bin/sh", "-c", "echo Hello from the observability postStart handler >> /tmp/message"] + preStop: + exec: + command: ["/bin/sh", "-c", "echo Hello from the observability postStart handler >> /tmp/message"] diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/files/system.yaml b/stable/jfrog-platform/local_dependancy_charts/xray/files/system.yaml new file mode 100644 index 000000000..b9ee79c6b --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/files/system.yaml @@ -0,0 +1,86 @@ +configVersion: 1 +router: + serviceRegistry: + insecure: {{ .Values.router.serviceRegistry.insecure }} +shared: +{{- if .Values.xray.openMetrics.enabled }} + metrics: + enabled: true + {{- if .Values.xray.openMetrics.filebeat.enabled }} + filebeat: {{ toYaml .Values.xray.openMetrics.filebeat | nindent 6 }} + {{- end }} +{{- end }} + logging: + consoleLog: + enabled: {{ .Values.xray.consoleLog }} + jfrogUrl: "{{ tpl (required "\n\nxray.jfrogUrl or global.jfrogUrl is required! This allows to connect to Artifactory.\nYou can copy the JFrog URL from Administration > Platform Security > General > Connection details" (include "xray.jfrogUrl" .)) . }}" + database: + {{- if .Values.postgresql.enabled }} + type: "postgresql" + driver: "org.postgresql.Driver" + username: "{{ .Values.postgresql.auth.username }}" + url: "postgres://{{ .Release.Name }}-postgresql:{{ .Values.postgresql.primary.service.ports.postgresql }}/{{ .Values.postgresql.auth.database }}?sslmode=disable" + {{- else }} + type: {{ .Values.database.type }} + driver: {{ .Values.database.driver }} + {{- end }} + {{- if and (not .Values.rabbitmq.enabled) (not .Values.common.rabbitmq.connectionConfigFromEnvironment) }} + rabbitMq: + {{- if .Values.global.xray.rabbitmq.haQuorum.enabled }} + ha_quorum: true + vhost: {{ .Values.global.xray.rabbitmq.haQuorum.vhost }} + replicasCount: {{ .Values.global.xray.rabbitmq.replicaCount }} + {{- end }} + erlangCookie: + value: "{{ .Values.rabbitmq.external.erlangCookie }}" + {{- if not .Values.rabbitmq.external.secrets }} + url: "{{ tpl .Values.rabbitmq.external.url . }}" + username: "{{ .Values.rabbitmq.external.username }}" + password: "{{ .Values.rabbitmq.external.password }}" + {{- end }} + {{- else if and .Values.rabbitmq.enabled .Values.global.xray.rabbitmq.haQuorum.enabled }} + rabbitMq: + ha_quorum: true + vhost: {{ .Values.global.xray.rabbitmq.haQuorum.vhost }} + replicasCount: {{ .Values.rabbitmq.replicaCount }} + {{- end }} + {{- if .Values.xray.mongoUrl }} + mongo: + url: "{{ .Values.xray.mongoUrl }}" + username: "{{ .Values.xray.mongoUsername }}" + password: "{{ .Values.xray.mongoPassword }}" + {{- end }} +{{- if or .Values.server.mailServer .Values.server.indexAllBuilds .Values.global.xray.rabbitmq.migrateMessagesFromXrayDefaultVhost .Values.global.xray.rabbitmq.migrateMessagesFromOtherRabbitMq }} +server: + {{- if .Values.server.mailServer }} + mailServer: "{{ .Values.server.mailServer }}" + {{- end }} + {{- if .Values.server.indexAllBuilds }} + indexAllBuilds: {{ .Values.server.indexAllBuilds }} + {{- end }} + {{- if .Values.global.xray.rabbitmq.migrateMessagesFromXrayDefaultVhost }} + dataMigrations: + migrate_msgs_from_other_rabbitmq: + vhost: {{ .Values.global.xray.rabbitmq.vhost | default "%2f" | quote }} + {{- else if .Values.global.xray.rabbitmq.migrateMessagesFromOtherRabbitMq }} + dataMigrations: + migrate_msgs_from_other_rabbitmq: {{ toYaml .Values.global.xray.rabbitmq.migrateMessagesFromOtherRabbitMq | nindent 6 }} + {{- end }} +{{- end }} +{{- if (include "xray.imagePullSecretsStrList" .) }} +executionService: + pullSecret: + {{- include "xray.imagePullSecretsStrList" . | indent 4 }} +{{- end }} +contextualAnalysis: + registry: {{ include "xray.getRegistryByService" (list . "contextualAnalysis") }} + image: {{ .Values.contextualAnalysis.image.repository }} +exposures: + container: + registry: {{ include "xray.getRegistryByService" (list . "exposures") }} + image: {{ .Values.exposures.image.repository }} +{{- if .Values.jas.healthcheck.enabled }} +jas: + healthCheckApi: + enabled: true +{{- end }} \ No newline at end of file diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/logo/xray-logo.png b/stable/jfrog-platform/local_dependancy_charts/xray/logo/xray-logo.png new file mode 100644 index 0000000000000000000000000000000000000000..23ed35bf5286f568c3e8b671a6e9c32fb30272d8 GIT binary patch literal 11735 zcmd^_RZtvHw8oKTkw91+f-M9IzPP))yL)g55FkikvBfobfDqhWH@I6M1a~L6yIlTn z_xZlwhneZBZ|a=x{?1fQ_4!4psmNksyu(01K){lhlhSy74*qwc0blPGdX?D-2w((x zDRC`tlOumSZ?e9IYu{2|nKmaMWX%3@bTh(W#P<$0DGADG@d)VgOCi0kF5TAXOEky1WHuU?&)>Qn<&Q>lB zpm>`6`)S;F|ERQ5!1JxR=sU!iP6`}wOpJ`!`)XSNm-yx01ll1AqB2NWhzltL`~~wP z08W)_3vy}xXYd|?s2q=D@v)+i_w`NqdZ9xM=yf0i`_YiG0-6Avg=JQ9OS}$9sKRFK zzetMV^^!Wb&Y+2l;aPWv1`GqXr zLNYcUc~Zf0l?=3J2%Z!Ya51>JY+&u}%Q0TcBPph*c_)(8Z%`$pB zg8&(C#RqU)$vBn4P*`0FKH;V^CLDc@0P@^lEegYEu~`KA!5^1SukkSw8;tFshkYKX zqijQ&93nTJf%vkuAlV={%)m`CzG$%^$6A5lbTLZE#ev!5ckqiH`rVGrhF7YtUG3pe zN(Q)%nv=3Wx_kKPlRFYEra*e9X2hNxgf7_33XkEMx(vnaeeg?PG%w990iCb>L;nr| zAJH{ra*~LfOZ;=^U~XjLJ}&i7U}ps2o;i40gYh>}K4(47?oUcMVMr6~jzH0MWEkrg zOWF9*-iTU)CgTi@UT!Fg)^bd2C0&Bp7Ksm9nz$>UqJT|9`nRobi<%LDBx-{CXm8%;Q6B?dM^ zkFO5N3?O~Rh@Xy%SMVk}(U$l+qhU0pE=T6gH(ybA85|-JY$ck^47Q zAgzTjBvpMTjq8EKt~{RXRz7nBh=nAbH$*}PsckalpT4^O>&8S%5b)+gcndNUP!ul& z`B_(;V72dbWxIqc;Ybd!p!m=&KCL>g(&;Sg45j2 zqr2g7c#S%spENomzMfLHMm+E}R+$#E1NFt=#MG6iOQJADpaZEeK&T0c6 zV3XHngLC{_>p$k?!P)Tk$dGI>+trg^_ymQ(+>-FOg}UDk!f5=^h8(bZOekf%-`zNgJ%$XSF&{mzn$uH;EqN7=+9n=?~@wofwlJd?-VqMZksE?VF z9rNMWaZ?*=?XyZBT*3YR^4Ls0_hTYv+Ai{?ClYx|`8;nUehk!`-*D!r!sY^x!RQN} zQhzzsV00*_%=Q+(F{_+WSXZXc&=9b&-`>#T3PX3lV|1kcD*(owQ|Fv8dP84pOuDJ{Mvj-0 zkzvQ<68_SIb0wD|4<E~n}d8zmgY*&U&tu%7dOHj3Izi?&Co9K zGs%~&5ficzJRSI_{KeB-Q$94rthLNi%%aJHt+nhF4qj-7ieMB;eq3W8us#vk4 z!p>hApqg?MIM7=fB=Rz&14PE?%y`VqF4FW+b6lj5&Yx?pH@68N7KWDGEgX`ysJQXnF8?$x=}6Su5MDc(p)5Hiw%+mQLVnj zK7WlG9j1>o0WO-`d}-wVdm6T?m@>RGEWr>wz;Uq~dJdx#=r#TB%@j3x4|^IQK(cC3~P%7nUu zW_3@wgZHQ*g+YTvb5|#qTNXyhvOtB&qdR`%tx8h`jrj zl3GJB20gB%D>HhJt{ce?^Rd`d=Rj6+I58gyooQ3MGX3}OEZo7N7b^5KZD5&0RluNy z;tru8ql^BpnPWYE-j+6M0+t_}p)AK%r_2pm=klmk@d2eKj`P#1(WaGyz87KIoVv8M z8bg<+%Gmg18h%{pk07a{L~M$;?DH%TmPY;O-3_(i1ta;p&4FAGdahs<11(RjfC;~N zG3#T-udN=3LXsTb5kYMS7Ti5jT(@EcMmgiS~2;vwkf}zqObid?@Gvx zhxQM1S9NwMAbpReT^=kr}l13FkR-xf;oj3g{a1uayqMN(N%x6X3kP z>QTe=2==L=Sz6XHUfq+;^mVd)PK_BxH*SdE5C@e=)|dMbKWc>JnVuA5RLK=ia1v=# zIAsJ;;;8d?Zu(OXeNH(ac3QsPtolx{lBjg&OO*l4xvvw_VE)BnyW{(8L~3e>T?kaTLm zQoZ$$gwy|3Jdi8`V4evgShCG(_o%hpnE1%F+Wp<_5hB_#iMV&Cdu)e!I zQP_*P8+{7W7n#tRy0xkOOy`A`?g35a(N^Z3gQo?)5&NLv+G=f6ZZjyVxD!qk{%%zT z&57t6lL6yfdZyi3DXsOq1(T@bE$qfHIj1oT!8cOCO{FL@xoDudP#QhXozHGolibeg z73XR#^WWwnHvj9tv0ub@cHX-+Zz_eR&%exvT=Us6Uq&8tf^kN`ZCGf+N~!03%Nvj zeEh-RE92k^yohcLU|x0FCvXgPaIqfrhA8Acx(8Wc2|~%kb;PfwG+jk17st@22Q9zb zQjpS52DK3$Rzk368~cyCye>)U`Cd@dRr%cIr`pDIJY*l$_W=}A|3(+^kmyXY3qgbI z#k=8M{$@g`DIIewO$Buq(JG!vY~0wvHr(NrfG8iZ%15Bp&JCJ(qB(COWc8$RYhipL ztU{7856Yc9tu$z!Aq9;M>m-RU2AY@8lK7m03D`LGSDnh7ohdbdlo(3JBHVmmoi zakw%JMo`7?B{~F>jO48u0(CsN>4RyW@vJDM^zA+tnoo@NY9*d#LaPxQAHR=1-LR~{ zy*)WpLT_vy{t%TFW*}ZPfiqN2vuJGb(kSc9UnW84N3Pc1WwQ+zmmRO4rYBKM9gDjF z`aLDJmiPDRc7N``EPlIt9ydxU`FWfc+NT}Z1d$zuXGgHPRM_jSeXBR2BigpmN)v4b zLMUiHQSbW(kTv;i#5r!f7#U4ZthM}D*1G%#{x((@XSO2^5P+ytrTfJ6oj+cySsLa! z`Ba!r^0j4jdf`rbG&y0ec#8Sr&n#J;5RTawGtisE*S^!7uLtaqri8tlZJFkrJST`^ zq)SS_fQzD5F1ZAE{MA`L^~?Ko_mCh$Y_sDnb1H8^KMcDGXYY)!QtN%RzjbFTWZ?}} z==eO?-0cfHb!t*X2q%-6(OEOdl}y)Ik880=1H+Q zo7Qu<^^HHPH2uR%VS4KmF9S$emDq!6t^B}vE|nLWN4rkb_Tz92|J&F%OT7$Jst-h8 z=z&4>C&zPle|$&J2O<)CQ378oGbCdxeo9L9k0B^uR7MZm^uAaGg zD4E+0ia}R;6vFX0U9NDufG$`Qs{b%wMHw-~ql(~#a`iCRIjC^UJt6=ILOKuD)3Ht+ z8k4=QIDUa<(*u7a+k1^^bLr@UqE}m6boTRElFpA~9x8qX-2KH5!CZLrmh*tI2qolB z6D?!grldQ1xADeVfURIxsco|73?ti7>s|LP1u=mX+86c*Ge-V^s0f#NTmn_!7QD@D zu;4gvZy84j%fJJUtF7}K2XM3#lzOUz(QtKk5s%ow+n;Uv3A_f?D#k|3 zOZ--p@~mrqmq6@xAku7WG+Nzq?4>^)iLU2zX}QjaJL8`!gke6$qj4jo@^k24?-^mv z^ANWyi6%C-SwKlKHo0VopgfH6_m&!!BOs--vC5GA&TKw%N{0r%xHf~}%P6|1lPH>M zwnXWYS%ZC%|aAEWj*z>I7;F*LWMBx=HtqXAD=-DA|3f@zKHrf477bGUv$+va(_44hft*mma z6bhV&M}T+w9lXCKMh}Xx8BO16!95d=Rr(qd$-LtOU7Fpvo8~hXb*A!#QaE?`1I+J2 z^0KmgQ6j47z{#y;FE?n?U1bk5LZR#p~{_jgSQYBvMF0^E!7&bM(IcRl`cmF!WI zr0RIv7$4OK%Eu*a;uG)ooExM2m^&zII%IKcXq4x%dywK7%djH-Wxx2z!hJ=QFC|9w zosa$mr4Jhqo+R~MmEG+kXIQhb*P-efar`h+-#3$GtI7GODAZ1!bW z@-aSQHqR4TN$p%ak330A2wLPYKWeYD-&X$d`wiVTdalM~-$OZ??kqWl%eq#ICa;D* zFqw4fYp+Aj@(?|VSHsL-WAfi`1X!UT*hbF|=vbm!41K$-0wXdTudcpsPIHC3ktp2O z?x+n~sf%j0kasXaIQnQPCb6A+nY=2stQ1}A=_tf1=7zJC+`o_aY3Q<9u!phfb6K)~D%)EY zm3fq+AHd(vUv1TV+&?xVp5;Z|?<2c9D=I(zWZlm%-zzU%82Ap4Va@AzoNE@Kr=2_! z&QqHK3G{+C3_6E=P9kW`d?cF-#r-)BuOgzr+c7^zFmXGGOKVhLDhI`*@){r2WsBDv zw7x{OrcV3V{wXiq*Nhkqb(^KcWc=O&uq!u4$g5j!-ml7Uy3o<77;P5&Be-Z~V|n!h z5%=W{j131a<+auC>goUSgW!O4d}P?fkp#>k%-)f=gUvSR#V%cYcF)FPOj7h&%Un&r z*(P=<*4EKaH4HC5&+gjv8(wZEiPbi$%Wg$h<~%^XKYd!w9XF&|x9`OL=W4F`o$31i z{rhl^PksP9%rQI~XR^{7rXwJ6oqiA%FZMvbB?F>fc(V20Reqa*lc;Mg9<6OQM&7zahu+smgXUHf zYeW^l2<=HV_iRvqPPqM{CV(VbgsltQt@tIW{ca+~R0Ss6d1^KI%0WJ|9TRgJ!{ML6OLGD>u0PqJ*}dEHtL7g;yW@^A%Q*Ilhhk(A2^}M2exr zfRlG{UPug`mo}otTU5*TC z<-jbB)|CVDC=OQDYZv{F^75+H;R@6GsO7oN#Aaelvw1y}A>gSibU|TDHj|u?8;-cX zOrCw0lc2n)8cqL8!iWc`$B zP-Cg3S=?=y<`T(f5cE{hSzF2rNAsx6ylRoIwRh8&)JjroQDka@2XwHx6z*O|~go=*uJi|8m;>va`%2dnZaMkWiMLcUEg z84Msju}%P+Q}4cq;h*+Km(i4PLDj?P{wb9ufO<@cL&B)|HsrG+wRpsLTJ#ahYXzYU zhLx(Ez`oqlor!eJdrqT}Gu;T&{IQ~@(0BWPgrk(Iuwz(hJlCUGJ?ms_PYv3dTM9?zX63H?bRv7*ffMsAx)fnh-GcNHoUX0#yC>w6)E5@v-LhRBXOz7vC2hQXpjCt zwCOyL=wFdG5j(rd4IjlrgTh*$>&bXON|@G4Tr*|LbE3QBE?73)vg$zv$-^nhgjbRc*5^zb6hjTf;Jn$c&` zAJ9=yxQ_Tc&AVK0#Xv+{kH6{l7_Dig(%!d8S8umHE3Y2OSJQ1`79$nWBS0d7h(s7O zfZ9UZJXXEHQIN`t)YiYkLSa%IVcB!CugNyCOmM-?4y zv#8f2W$~;Ox*}hA(h$0DL|NH-jQ9$NFA-yIJb77K6w!s8)0I!s}$lI z+|dSTu-EqcI%>8oHIl?`|3F>PK|k0ido4SZZ9 zpst~DUAqdP+`J+LJ3rsILz1vAELEb3i*8*<@0S?5_Kes8$)|xah`$NjdnDXh;%+2L z1!_2WQ=3>SKIXD$z35Rp-KjU#SAI^k<7=b0ZFO>iTz>ebug>>WmLJ*DFVYot z2D(1_I!DY^Yyfqv(nXWR7+;iVmjUF#?BQM{@TJ&BV~W~SMMV9!?6Q8$j~C!a3LIsR zMU&BBv%qxHx!#w?ar@iwMV@P&_B;j;o|@()G)CRLe-%#7(FV6j6Y^ebHO>_2{K+=jJE3onz4k(VyyO6u5#}66xZIXFe&!}et%ytNQc>> zJfK}6(0(av`cS6OdFVWY$6Pool18BIx@w#cy7{Jxx{wkImsVezEPThtJXzmo;uV{t zQj?8(nhngxpk%N<^@ZMUNQu;DmHeSX=jJfveM4o6u1m{%a+mGFSb|NSWC=sTeEnZO z@ZiN_%!BBmqzCA9KNB(634&nPZ?`ZprJiwW1fm4L$8%}2PeWu*CC* zDU=9zL{`sD(+1REK$=@GVfVNf>SAJN&<$*Z^_$wh4wjJVQS_(W3hSQc_ z#_L{iljl4%o=Qt>0+;A~@_#hx(AR8>jD){ytbc|7pTZw%f*hmojr5 zHeked0e>UD&6aW!B{t3vjEF+?{;VBW!Acz6fT>}*A>Ks4byk|d1^s|%K~Z2zO{mM~ zR)}k2n@2ht#hilO;Kj{T@P$dQ&xY{>nJ+K9KhtD^4jg#iMux{D$53^!P=9?>f-#zF z!rF*WUu@RNB4cb|g;gU4vgS6%@3pZcitGsQI zdJ-?ZA}(DD{H@N0-sZv8$8EQ$wGuGrBF>yRDT*6*sxc%2B3ia_5>m{CS=?SPJbvA|NoU7lW9*&X`|=>e)G<5%)#d6l zqYNA7EOmTLeE%NE-aFo#cHx^nNok@2~wzn|ku*3;?T{nMK~S)Y;{~hUnHw^dW3#$|+AvxUTBW-_-l| z(F3MzX6iJ>{LeQ_ZCi&woT7d_p5G3XVIE*JO?k*=@j&@08GhM3B4-^ldiN}(5~e2~`2 zVjEO*%ZfcaaoW0lUtIs^4bA!xMI@`@x08f(*H{_1dK|Q5+$nT3vbU!{5Mplbam`#Q zgLF7=h2BS?7(748r0>7;hDh^XJm)QB)#f?ndL52rrln5lUN;Mb|_$I%4 zk(w|U_*IAhYf*~yNKyyPn=eJSF7%k6>PE;F_+F6FIL>zAx~r?X3^%2f1Toxe&Guih z6@7^*gDOx0qMLvpv(NxC!OihbI%P8@$D>1?&CsZT&X z@L}*p)Z!b~%u!FKV@gLg{q6a#rE5`~H=PU{-VF3Ck>yS|%0cts z`8(|Fx#HMtX+eSdj0X0K@}zcsJvfQWZn&&JEls?iI1>*#5j=|+a;x!bL#&=XCeMLP zT&Oo5FG1p`(n~A6o6?HUNtEN$B(&~5?0$S06_rKc1x-?!;4dF=iKZ2H0h?txQs%9e zn<`&^N|$^NQPQgFird>z#EOsfb8fc#9l@sLCn!(x9#G<~g@VcRXr#j6qjf2h(%e() z`&@5zorqPk5a1OoJu?b!V=QP4#vVr0bK#sRtw#_~}8jul%HE%CvEd zb!W6Zv%30K#Z^Gz(cXy1x7U{axE4N=BP%=r+zDY4ZB8UH-bgIgHag=Eu*z5zPfbBl zd7rG@H_=y#r(|=_Kb=TlFaUXr-7M=X9w)y~l-Kn=8*IDsO=)pO=VisFBW}ZSZ(|7N zpUnndJ31nb-!BVmW5I$Tw?w|~WOm@#Nmy zz0|8K>>a^A_;>wzCGzGUDX0ME^lwlHwu!viUE+Cx!d9<{I6rP_b_2kzUZ;}Ln%fow zmnd-WmGV(Ui{WD&$fACapv6<70{X@1bXRE}IgO0G?7*>&r9Y`BoHW=>szm?r>w$|) z2_8x^BKGJ_)3iJ3(ZBf4VCGlqpfeP6UxqejItfZ6f?en?rnQk8?kcqQRwiU?8t{c^ z&5L%^A4w!R1Un{ZZ{NAh*T2;H3w_BKWy6g{vyjHb$xYKC*j6TyFU zloWAcu(~m=*Rbh&Uby@C=-=T1+3jD%c_CQhPlZp1HFtO76d$S+1Q!rw-URp8eu7%H zgg&6YyGa?bl#v`0X|Y;D7P|{6;j@oq@kIrCX4oA;jYNU)lQiTAd>L4x>Qjz{k633=sSo}zppf~Ux3W&! z0i$|IEHH=+ajWzM9Z1CI=8!IGF+|ibb1ZD|Q^8OMSd#2Tp+SpXQq*cxOTwIEk@)qW z?YxI=pUoy1pj` zEQp}a$}~qI1OY*+th(>=SK50v)zMKzS2dO|H`EUNzJZGQs#VE@jJ6b4Ipr|R&AM5T z5_SkuXR~-=rn*G`$R(Bs%m<-p|J5Z!OmN3REb1ECOoeOS{lJVLJRv+( z6g_@f<2`Hb)#A}H?YH_6x7zGqW|3gYgZ${1T<)8q6_$$4ZAqJ_SvXA|0l4c0s+6*w z-hms0nXjXK%ITb%2EJ}yoYZiCvA!)oP4Ql={8)~K_k%jt$#grjF@2gpj`oCC$w6Hx zwRGd#d&A+Fk5IpJ@ebGiVzb<09b!JLTua1^M9drK4pfvnKSLmpaglLhrhl=qu8ek% zfm&k))z{mb3Hf_sHP~W=KJ~Se(zp2R%2b42n2m_2DNBM`ha>gN$+*i=+znSZF#_okUn4ldU>%%G}2naaC@{#MrS1K>NL`%L8vYR-wn z!25S~a0uZyDU<@(1OMmAL#V{mJ2pLHfB%`r@u{9nnLo!`a4|b*Z`5Ds;Sx|O>edy( zkX-c#MynrGTjod`eBzA!nB-m2k`al%kfHcEkbY`xFL@zAiO5S^IllQvkLUPBDoRO1aNw5z8#R!ubVwXOM-_Cw;bxI+T8ecWa0h%maCE*FZiEC-#L zXP)I->5iwAYt)?!eR5$Dwzf0)^QSLJZiZ+QFa*FG59T}E>Os1bX}5D}u)~*`Cx0VA zt%pG04GZ`a9!Mv5Up*^qi}=cZ>YLH9Slqwjobr^{t-H)(Vt6!fqVuJdC||XIJtj9< zC}zMz|YvLiK_jWi_{~ssC1~}88 z_kXva&5iUDi_nHhbKE8JH2?4J?yisM9f;dmV8WUkDLn)M@2edCFFGTF<2c#MTXYS| zcg7!I`GaOic9kn06Dt zo)2Y zaaFI{Ek=>EWAkh2a4a#GUY!xVLdB-*e`B!zgObBCr5i-rsdLdcn6g@H*24nn23_ll zX9jd%HNQ|zUY@1@mg-D6eSaJI3hq|9?)Qh0{/dev/null + exit_status=$? + if [[ $exit_status -eq 0 ]]; then + ready=true + echo "catalogdb database is available" + exit 0 + fi + + psql -h {{ .Release.Name }}-postgresql --username "{{ .Values.postgresql.auth.username }}" -d {{ .Values.postgresql.auth.database }} -c "CREATE DATABASE catalogdb;" -c "GRANT ALL PRIVILEGES ON DATABASE catalogdb TO {{ .Values.postgresql.auth.username }};" + env: + - name: PGPASSWORD + valueFrom: + secretKeyRef: + name: "{{ .Release.Name }}-postgresql" + key: password +{{- end -}} \ No newline at end of file diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/templates/filebeat-configmap.yaml b/stable/jfrog-platform/local_dependancy_charts/xray/templates/filebeat-configmap.yaml new file mode 100644 index 000000000..882919880 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/templates/filebeat-configmap.yaml @@ -0,0 +1,15 @@ +{{- if .Values.filebeat.enabled }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "xray.fullname" . }}-filebeat-config + labels: + app: {{ template "xray.fullname" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} +data: + filebeat.yml: | +{{ tpl .Values.filebeat.filebeatYml . | indent 4 }} +{{- end -}} \ No newline at end of file diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/templates/keys-warnings.yaml b/stable/jfrog-platform/local_dependancy_charts/xray/templates/keys-warnings.yaml new file mode 100644 index 000000000..4b267e0db --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/templates/keys-warnings.yaml @@ -0,0 +1,15 @@ +{{- if and (not .Release.IsUpgrade) (eq .Values.rabbitmq.auth.password "password") .Values.rabbitmq.enabled }} +{{- fail "\n\n**************************************\nSTOP! INSTALLATION not allowed with the default RabbitMQ password!\nPlease change the RabbitMQ password from the default value before proceeding with the installation.\n**************************************\n" }} +{{- end }} + +{{- if .Values.postgresql.enabled }} + {{- if .Values.postgresql.postgresqlPassword }} + {{- fail "\n\nThe key 'postgresql.postgresqlPassword' is not supported in the latest packaged version of Postgres in this chart.\nUse the new key 'postgresql.auth.password' to proceed with installation.\n" }} + {{- end }} + {{- if .Values.postgresql.postgresqlExtendedConf }} + {{- fail "\n\nThe key 'postgresql.postgresqlExtendedConf' is not supported in the latest packaged version of postgres in this chart.\nUse the new key 'postgresql.primary.extendedConfiguration' to proceed with installation.\n" }} + {{- end }} + {{- if .Values.global.postgresqlPassword }} + {{- fail "\n\nThe key 'global.postgresqlPassword' is not supported in the latest packaged version of postgres in this chart.\nUse the new key 'global.auth.password' to proceed with installation.\n" }} + {{- end }} +{{- end }} diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/templates/logger-configmap.yaml b/stable/jfrog-platform/local_dependancy_charts/xray/templates/logger-configmap.yaml new file mode 100644 index 000000000..6c9f23dcd --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/templates/logger-configmap.yaml @@ -0,0 +1,63 @@ +{{- if .Values.xray.loggers }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "xray.fullname" . }}-logger + labels: + app: {{ template "xray.fullname" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} +data: + tail-log.sh: | + #!/bin/sh + + LOG_DIR=$1 + LOG_NAME=$2 + PID= + + # Wait for log dir to appear + while [ ! -d ${LOG_DIR} ]; do + sleep 1 + done + + cd ${LOG_DIR} + + LOG_PREFIX=$(echo ${LOG_NAME} | sed 's/.log$//g') + + # Find the log to tail + LOG_FILE=$(ls -1t ./${LOG_PREFIX}.log 2>/dev/null) + + # Wait for the log file + while [ -z "${LOG_FILE}" ]; do + sleep 1 + LOG_FILE=$(ls -1t ./${LOG_PREFIX}.log 2>/dev/null) + done + + echo "Log file ${LOG_FILE} is ready!" + + # Get inode number + INODE_ID=$(ls -i ${LOG_FILE}) + + # echo "Tailing ${LOG_FILE}" + tail -F ${LOG_FILE} & + PID=$! + + # Loop forever to see if a new log was created + while true; do + # Check inode number + NEW_INODE_ID=$(ls -i ${LOG_FILE}) + + # If inode number changed, this means log was rotated and need to start a new tail + if [ "${INODE_ID}" != "${NEW_INODE_ID}" ]; then + kill -9 ${PID} 2>/dev/null + INODE_ID="${NEW_INODE_ID}" + + # Start a new tail + tail -F ${LOG_FILE} & + PID=$! + fi + sleep 1 + done + +{{- end }} diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/templates/migration-hook.yaml b/stable/jfrog-platform/local_dependancy_charts/xray/templates/migration-hook.yaml new file mode 100644 index 000000000..3f4eab618 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/templates/migration-hook.yaml @@ -0,0 +1,197 @@ +{{- if .Values.rabbitmq.enabled }} +{{- if and (not .Values.rabbitmq.migration.enabled) (not .Values.rabbitmq.rabbitmqUpgradeReady) }} + {{- fail "Rabbitmq migration flag is disabled. Please enable the rabbitmq.rabbitmqUpgradeReady flag after manually enabling the feature flags in rabbitmq" }} +{{- end }} +{{- if eq (include "xray.rabbitmq.migration.isHookRegistered" .) "true" }} +{{- if .Values.rabbitmq.migration.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: {{ template "xray.name" . }} + chart: {{ template "xray.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + name: {{ template "xray.rabbitmq.migration.serviceAccountName" . }} + annotations: + helm.sh/hook: "pre-upgrade" + helm.sh/hook-weight: "-10" +{{- with .Values.rabbitmq.migration.serviceAccount.annotations }} +{{ toYaml . | indent 8 }} +{{- end }} +automountServiceAccountToken: {{ .Values.rabbitmq.migration.serviceAccount.automountServiceAccountToken }} +{{- end }} +{{- end }} +{{- end }} +--- +{{- if .Values.rabbitmq.enabled }} +{{- if eq (include "xray.rabbitmq.migration.isHookRegistered" .) "true" }} +{{- if .Values.rabbitmq.migration.serviceAccount.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: {{ template "xray.name" . }} + chart: {{ template "xray.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + name: {{ template "xray.rabbitmq.migration.fullname" . }} + annotations: + helm.sh/hook: "pre-upgrade" + helm.sh/hook-weight: "-10" +rules: +{{ toYaml .Values.rabbitmq.migration.serviceAccount.rbac.role.rules }} +{{- end }} +{{- end }} +{{- end }} +--- +{{- if .Values.rabbitmq.enabled }} +{{- if eq (include "xray.rabbitmq.migration.isHookRegistered" .) "true" }} +{{- if .Values.rabbitmq.migration.serviceAccount.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: {{ template "xray.name" . }} + chart: {{ template "xray.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + name: {{ template "xray.rabbitmq.migration.fullname" . }} + annotations: + helm.sh/hook: "pre-upgrade" + helm.sh/hook-weight: "-10" +subjects: + - kind: ServiceAccount + name: {{ template "xray.rabbitmq.migration.serviceAccountName" . }} +roleRef: + kind: Role + apiGroup: rbac.authorization.k8s.io + name: {{ template "xray.rabbitmq.migration.fullname" . }} +{{- end }} +{{- end }} +{{- end }} +--- +{{- if .Values.rabbitmq.enabled }} +{{- if eq (include "xray.rabbitmq.migration.isHookRegistered" .) "true" }} +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app: {{ template "xray.name" . }} + chart: {{ template "xray.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "xray.fullname" . }}-pre-upgrade-hook + annotations: + "helm.sh/hook": "pre-upgrade" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +spec: + template: + metadata: + labels: + app: {{ template "xray.name" . }} + chart: {{ template "xray.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + spec: + {{- if .Values.rabbitmq.podSecurityContext.enabled }} + securityContext: {{- omit .Values.rabbitmq.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if or .Values.imagePullSecrets .Values.global.imagePullSecrets }} + {{- include "xray.imagePullSecrets" . | indent 6 }} + {{- end }} + serviceAccountName: {{ template "xray.rabbitmq.migration.serviceAccountName" . }} + containers: + - name: pre-upgrade-container + image: "{{ include "xray.getRegistryByService" (list . "migrationHook") }}/{{ .Values.rabbitmq.migration.image.repository }}:{{ .Values.rabbitmq.migration.image.tag }}" + imagePullPolicy: IfNotPresent + {{- if .Values.rabbitmq.resources }} + resources: + {{- toYaml .Values.rabbitmq.resources | nindent 12 }} + {{- end }} + {{- if .Values.rabbitmq.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.rabbitmq.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - bash + - -c + - | + #!/bin/bash + rabbitMqZeroPodName="{{ .Release.Name }}-{{ template "rabbitmq.name" . }}-0" + rabbitMqZeroPodStatus=$(kubectl get pods $rabbitMqZeroPodName -n {{ .Release.Namespace }} -o jsonpath='{..status.conditions[?(@.type=="Ready")].status}') + + {{- if and .Values.global.xray.rabbitmq.haQuorum.enabled .Values.rabbitmq.migration.removeHaPolicyOnMigrationToHaQuorum.enabled }} + for (( i=1; i<=6; i++ )) + do + if [ "$rabbitMqZeroPodStatus" = "True" ]; then + break + fi + echo "Waiting for Rabbitmq zero pod $rabbitMqZeroPodName to be in Ready state - iteration $i" + sleep 5 + rabbitMqZeroPodStatus=$(kubectl get pods $rabbitMqZeroPodName -n {{ .Release.Namespace }} -o jsonpath='{..status.conditions[?(@.type=="Ready")].status}') + done + if [ "$rabbitMqZeroPodStatus" != "True" ]; then + echo "Rabbitmq zero pod $rabbitMqZeroPodName is not in Ready state. Failed to remove mirroring policy 'ha-all'" + exit 1 + fi + policyExists=$(kubectl exec -i $rabbitMqZeroPodName -n {{ .Release.Namespace }} -- bash -c "rabbitmqctl list_policies --formatter json | grep -o "'"\"name\":\"ha-all\""'" | wc -l | tr -d '[:space:]'") + if [ "$?" -ne 0 ]; then + echo "Failed to check if policy ha-all exists on default vhost" + exit 1 + fi + echo "Policy ha-all exists: $policyExists" + if [ $policyExists -gt 0 ]; then + kubectl exec -i $rabbitMqZeroPodName -n {{ .Release.Namespace }} -- rabbitmqctl clear_policy ha-all + if [ "$?" -ne 0 ]; then + echo "Failed to delete policy ha-all on default vhost" + exit 1 + else + echo "Deleted ha-all policy successfully on default vhost" + fi + fi + {{- end }} + + {{- if .Values.rabbitmq.migration.enabled }} + if [ "$rabbitMqZeroPodStatus" = "True" ]; then + kubectl exec -i $rabbitMqZeroPodName -n {{ .Release.Namespace }} -- rabbitmqctl enable_feature_flag all + if [ "$?" -ne 0 ]; then + echo "Failed to perform the migration. Please make sure to enable the feature flag in rabbitmq manually [rabbitmqctl enable_feature_flag all] " + exit 1 + else + echo Feature flags executed successfully! + fi + else + echo "Rabbitmq zero pod is not in running state. Ignoring feature flag migration for rabbitmq" + fi + {{- end }} + + {{- if .Values.rabbitmq.migration.deleteStatefulSetToAllowFieldUpdate.enabled }} + if [ -n "{{ .Values.rabbitmq.podManagementPolicy }}" ]; then + rabbitMqStatefulSetName=$(kubectl get statefulsets -n {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "rabbitmq.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o=jsonpath='{.items[0].metadata.name}') + if [ $? -ne 0 ]; then + echo "Failed to get current rabbitmq statefulset name" + exit 1 + fi + currPodManagementPolicy=$(kubectl get statefulset $rabbitMqStatefulSetName -n {{ .Release.Namespace }} -o=jsonpath='{.spec.podManagementPolicy}') + if [ $? -ne 0 ]; then + echo "Failed to get current pod management policy definition" + exit 1 + fi + if [ "$currPodManagementPolicy" != "{{ .Values.rabbitmq.podManagementPolicy }}" ]; then + kubectl delete statefulset $rabbitMqStatefulSetName --cascade=orphan -n {{ .Release.Namespace }} + if [ $? -ne 0 ]; then + echo "Failed to delete statefulset $rabbitMqStatefulSetName to allow update of podManagementDefinition field: [kubectl delete statefulset STATEFULSET_NAME --cascade=orphan]" + exit 1 + fi + echo "Deleted statefulset $rabbitMqStatefulSetName successfully" + else + echo "Field podManagementPolicy of statefulset $rabbitMqStatefulSetName has not changed" + fi + else + echo "rabbitmq.podManagementPolicy is not set" + fi + {{- end }} + restartPolicy: Never + terminationGracePeriodSeconds: 0 +{{- end }} +{{- end }} \ No newline at end of file diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-configmaps.yaml b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-configmaps.yaml new file mode 100644 index 000000000..b74915e85 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-configmaps.yaml @@ -0,0 +1,13 @@ +{{- if .Values.common.configMaps }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "xray.fullname" . }}-configmaps + labels: + app: {{ template "xray.name" . }} + chart: {{ template "xray.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: +{{ tpl .Values.common.configMaps . | indent 2 }} +{{- end }} diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-custom-secrets.yaml b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-custom-secrets.yaml new file mode 100644 index 000000000..a4ab543f1 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-custom-secrets.yaml @@ -0,0 +1,19 @@ +{{- if and (.Values.common.customSecrets) (not .Values.xray.unifiedSecretInstallation) }} +{{- range .Values.common.customSecrets }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "xray.fullname" $ }}-{{ .name }} + labels: + app: "{{ template "xray.name" $ }}" + chart: "{{ template "xray.chart" $ }}" + component: "{{ $.Values.xray.name }}" + heritage: {{ $.Release.Service | quote }} + release: {{ $.Release.Name | quote }} +type: Opaque +stringData: + {{ .key }}: | +{{ .data | indent 4 -}} +{{- end -}} +{{- end -}} diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-database-secrets.yaml b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-database-secrets.yaml new file mode 100644 index 000000000..923dac6f9 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-database-secrets.yaml @@ -0,0 +1,27 @@ +{{- if and (not .Values.database.secrets) (not .Values.postgresql.enabled) (not .Values.xray.unifiedSecretInstallation) }} +{{- if or .Values.database.url .Values.database.user .Values.database.password .Values.database.actualUsername }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "xray.fullname" . }}-database-creds + labels: + app: {{ template "xray.name" . }} + chart: {{ template "xray.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +type: Opaque +data: + {{- with .Values.database.url }} + db-url: {{ tpl . $ | b64enc | quote }} + {{- end }} + {{- with .Values.database.user }} + db-user: {{ tpl . $| b64enc | quote }} + {{- end }} + {{- with .Values.database.password }} + db-password: {{ tpl . $ | b64enc | quote }} + {{- end }} + {{- if .Values.database.actualUsername }} + db-actualUsername: {{ .Values.database.actualUsername | b64enc | quote }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-hpa-ipa.yaml b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-hpa-ipa.yaml new file mode 100644 index 000000000..f3438a206 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-hpa-ipa.yaml @@ -0,0 +1,31 @@ +{{- if and (not .Values.splitXraytoSeparateDeployments.gradualUpgrade) .Values.splitXraytoSeparateDeployments.enabled }} +{{- if and (.Values.autoscalingIpa.enabled) (eq .Values.autoscalingIpa.keda.enabled false) }} + {{- if semverCompare ">=v1.23.0-0" .Capabilities.KubeVersion.Version }} +apiVersion: autoscaling/v2 + {{- else }} +apiVersion: autoscaling/v2beta2 + {{- end }} +kind: HorizontalPodAutoscaler +metadata: + labels: + app: {{ template "xray.name" . }} + chart: {{ template "xray.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "xray.fullname" . }}-ipa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "xray.fullname" . }}-ipa + minReplicas: {{ .Values.autoscalingIpa.minReplicas }} + maxReplicas: {{ .Values.autoscalingIpa.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.autoscalingIpa.targetCPUUtilizationPercentage }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-hpa-server.yaml b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-hpa-server.yaml new file mode 100644 index 000000000..147d1db79 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-hpa-server.yaml @@ -0,0 +1,31 @@ +{{- if and (not .Values.splitXraytoSeparateDeployments.gradualUpgrade) .Values.splitXraytoSeparateDeployments.enabled }} +{{- if and (.Values.autoscalingServer.enabled) (eq .Values.autoscalingServer.keda.enabled false) }} + {{- if semverCompare ">=v1.23.0-0" .Capabilities.KubeVersion.Version }} +apiVersion: autoscaling/v2 + {{- else }} +apiVersion: autoscaling/v2beta2 + {{- end }} +kind: HorizontalPodAutoscaler +metadata: + labels: + app: {{ template "xray.name" . }} + chart: {{ template "xray.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "xray.fullname" . }}-server +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "xray.fullname" . }}-server + minReplicas: {{ .Values.autoscalingServer.minReplicas }} + maxReplicas: {{ .Values.autoscalingServer.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.autoscalingServer.targetCPUUtilizationPercentage }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-hpa.yaml b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-hpa.yaml new file mode 100644 index 000000000..196b42751 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-hpa.yaml @@ -0,0 +1,37 @@ +{{- if or (and .Values.splitXraytoSeparateDeployments.gradualUpgrade .Values.splitXraytoSeparateDeployments.enabled) (not .Values.splitXraytoSeparateDeployments.enabled) }} +{{- if and (.Values.autoscaling.enabled) (eq .Values.autoscaling.keda.enabled false) }} + {{- if semverCompare ">=v1.23.0-0" .Capabilities.KubeVersion.Version }} +apiVersion: autoscaling/v2 + {{- else }} +apiVersion: autoscaling/v2beta2 + {{- end }} +kind: HorizontalPodAutoscaler +metadata: + labels: + app: {{ template "xray.name" . }} + chart: {{ template "xray.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "xray.fullname" . }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: StatefulSet + name: {{ template "xray.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-ipa-deployment.yaml b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-ipa-deployment.yaml new file mode 100644 index 000000000..3e8403dbe --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-ipa-deployment.yaml @@ -0,0 +1,1411 @@ +{{- if .Values.splitXraytoSeparateDeployments.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "xray.fullname" . }}-ipa + labels: + app: {{ template "xray.name" . }} + chart: {{ template "xray.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + component: {{ .Values.xray.name }} + servicename: ipa + {{- with .Values.xray.labels }} +{{ toYaml . | indent 4 }} + {{- end }} +{{- if .Release.IsUpgrade }} + unifiedUpgradeAllowed: {{ required "\n\n**************************************\nSTOP! UPGRADE from Xray 2.x (appVersion) currently not supported!\nIf this is an upgrade over an existing Xray 3.x, explicitly pass 'unifiedUpgradeAllowed=true' to upgrade.\n**************************************\n" .Values.unifiedUpgradeAllowed | quote }} +{{- end }} +{{- if and .Release.IsUpgrade .Values.postgresql.enabled }} + databaseUpgradeReady: {{ required "\n\n*********\nIMPORTANT: UPGRADE STOPPED to prevent data loss!\nReview CHANGELOG.md (https://github.com/jfrog/charts/blob/master/stable/xray/CHANGELOG.md), pass postgresql.image.tag '9.6.18-debian-10-r7' or '10.13.0-debian-10-r38' or '12.5.0-debian-10-r25' or 13.10.0-debian-11-r14 or 15.2.0-debian-11-r23 and databaseUpgradeReady=true if you are upgrading from chart version which has postgresql version 9.6.x or 10.13.x or 12.5.x or 13.x or 15.x" .Values.databaseUpgradeReady | quote }} +{{- end }} +{{- with .Values.server.statefulset.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if not .Values.autoscalingIpa.enabled }} + replicas: {{ .Values.replicaCount }} +{{- end }} +{{- with .Values.deployment.strategy }} + strategy: +{{ toYaml . | indent 4 }} +{{- end }} + selector: + matchLabels: + app: {{ template "xray.name" . }} + release: {{ .Release.Name }} + component: {{ .Values.xray.name }} + servicename: ipa + template: + metadata: + labels: + app: {{ template "xray.name" . }} + chart: {{ template "xray.chart" . }} + release: {{ .Release.Name }} + component: {{ .Values.xray.name }} + servicename: ipa + {{- with .Values.xray.labels }} +{{ toYaml . | indent 8 }} + {{- end }} + annotations: + {{- if not .Values.xray.unifiedSecretInstallation }} + checksum/database-secrets: {{ include (print $.Template.BasePath "/xray-database-secrets.yaml") . | sha256sum }} + checksum/systemyaml: {{ include (print $.Template.BasePath "/xray-system-yaml.yaml") . | sha256sum }} + {{- else }} + checksum/xray-unified-secret: {{ include (print $.Template.BasePath "/xray-unified-secret.yaml") . | sha256sum }} + {{- end }} + {{- with .Values.analysis.annotations }} +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.indexer.annotations }} +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.persist.annotations }} +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.server.annotations }} +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.router.annotations }} +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.filebeat.annotations }} +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.xray.annotations }} +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.policyenforcer.annotations }} +{{ toYaml . | indent 8 }} + {{- end }} + spec: + {{- if .Values.xray.schedulerName }} + schedulerName: {{ .Values.xray.schedulerName | quote }} + {{- end }} + {{- if or .Values.imagePullSecrets .Values.global.imagePullSecrets }} +{{- include "xray.imagePullSecrets" . | indent 6 }} + {{- end }} + {{- if .Values.xray.priorityClass.existingPriorityClass }} + priorityClassName: {{ .Values.xray.priorityClass.existingPriorityClass }} + {{- else -}} + {{- if .Values.xray.priorityClass.create }} + priorityClassName: {{ default (include "xray.fullname" .) .Values.xray.priorityClass.name }} + {{- end }} + {{- end }} + serviceAccountName: {{ template "xray.serviceAccountName" . }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.common.topologySpreadConstraints }} + topologySpreadConstraints: +{{ tpl (toYaml .Values.common.topologySpreadConstraints) . | indent 8 }} + {{- end }} + initContainers: + {{- if or .Values.common.customInitContainersBegin .Values.global.customInitContainersBegin }} +{{ tpl (include "xray.customInitContainersBegin" .) . | indent 6 }} + {{- end }} + - name: 'copy-system-yaml' + image: {{ include "xray.getImageInfoByValue" (list . "initContainers") }} + imagePullPolicy: {{ .Values.initContainers.image.pullPolicy }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- tpl (omit .Values.containerSecurityContext "enabled" | toYaml) . | nindent 10 }} + {{- end }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'bash' + - '-c' + - > + if [[ -e "{{ .Values.xray.persistence.mountPath }}/etc/filebeat.yaml" ]]; then chmod 644 {{ .Values.xray.persistence.mountPath }}/etc/filebeat.yaml; fi; + echo "Copy system.yaml to {{ .Values.xray.persistence.mountPath }}/etc"; + mkdir -p {{ .Values.xray.persistence.mountPath }}/etc; + {{- if .Values.systemYamlOverride.existingSecret }} + cp -fv /tmp/etc/{{ .Values.systemYamlOverride.dataKey }} {{ .Values.xray.persistence.mountPath }}/etc/system.yaml; + {{- else }} + cp -fv /tmp/etc/system.yaml {{ .Values.xray.persistence.mountPath }}/etc/system.yaml; + {{- end }} + echo "Remove {{ .Values.xray.persistence.mountPath }}/lost+found folder if exists"; + rm -rfv {{ .Values.xray.persistence.mountPath }}/lost+found; + {{- if or .Values.xray.joinKey .Values.xray.joinKeySecretName .Values.global.joinKey .Values.global.joinKeySecretName }} + echo "Copy joinKey to {{ .Values.xray.persistence.mountPath }}/etc/security"; + mkdir -p {{ .Values.xray.persistence.mountPath }}/etc/security; + echo ${XRAY_JOIN_KEY} > {{ .Values.xray.persistence.mountPath }}/etc/security/join.key; + {{- end }} + {{- if or .Values.xray.masterKey .Values.xray.masterKeySecretName .Values.global.masterKey .Values.global.masterKeySecretName }} + echo "Copy masterKey to {{ .Values.xray.persistence.mountPath }}/etc/security"; + mkdir -p {{ .Values.xray.persistence.mountPath }}/etc/security; + echo ${XRAY_MASTER_KEY} > {{ .Values.xray.persistence.mountPath }}/etc/security/master.key; + {{- end }} + if set | grep -q "^XRAY_RABBITMQ_PASSWORD="; then + echo "Copy rabbitmq password to {{ .Values.xray.persistence.mountPath }}/etc/security"; + mkdir -p {{ .Values.xray.persistence.mountPath }}/etc/security; + echo ${XRAY_RABBITMQ_PASSWORD} > {{ .Values.xray.persistence.mountPath }}/etc/security/rabbitmq.password; + else + if test -f "{{ .Values.xray.persistence.mountPath }}/etc/security/rabbitmq.password"; then + echo "XRAY_RABBITMQ_PASSWORD is not set, removing existing rabbitmq.password file."; + rm -f {{ .Values.xray.persistence.mountPath }}/etc/security/rabbitmq.password; + fi + fi + {{ if or .Values.database.secrets.password .Values.database.password .Values.postgresql.enabled }} + if set | grep -q "^XRAY_POSTGRES_PASSWORD="; then + echo "Copy postgres password to {{ .Values.xray.persistence.mountPath }}/etc/security"; + mkdir -p {{ .Values.xray.persistence.mountPath }}/etc/security; + echo ${XRAY_POSTGRES_PASSWORD} > {{ .Values.xray.persistence.mountPath }}/etc/security/postgres.password; + else + if test -f "{{ .Values.xray.persistence.mountPath }}/etc/security/postgres.password"; then + echo "XRAY_POSTGRES_PASSWORD is not set, removing existing postgres.password file."; + rm -f {{ .Values.xray.persistence.mountPath }}/etc/security/postgres.password; + fi + fi + {{- end }} + env: + {{- if or .Values.xray.joinKey .Values.xray.joinKeySecretName .Values.global.joinKey .Values.global.joinKeySecretName }} + - name: XRAY_JOIN_KEY + valueFrom: + secretKeyRef: + {{- if or (not .Values.xray.unifiedSecretInstallation) (or .Values.xray.joinKeySecretName .Values.global.joinKeySecretName) }} + name: {{ include "xray.joinKeySecretName" . }} + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: join-key + {{- end }} + {{- if or .Values.xray.masterKey .Values.xray.masterKeySecretName .Values.global.masterKey .Values.global.masterKeySecretName }} + - name: XRAY_MASTER_KEY + valueFrom: + secretKeyRef: + {{- if or (not .Values.xray.unifiedSecretInstallation) (or .Values.xray.masterKeySecretName .Values.global.masterKeySecretName) }} + name: {{ include "xray.masterKeySecretName" . }} + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: master-key + {{- end }} + {{- if and .Values.rabbitmq.external.secrets (not .Values.common.rabbitmq.connectionConfigFromEnvironment) }} + - name: XRAY_RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ tpl .Values.rabbitmq.external.secrets.password.name . }} + key: {{ tpl .Values.rabbitmq.external.secrets.password.key . }} + {{- end }} + {{- if .Values.common.rabbitmq.connectionConfigFromEnvironment }} + - name: XRAY_RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "rabbitmq.passwordSecretName" .}} + key: rabbitmq-password + {{- end }} + {{ if or .Values.database.secrets.password .Values.database.password .Values.postgresql.enabled }} + - name: XRAY_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.password }} + name: {{ tpl .Values.database.secrets.password.name . }} + key: {{ tpl .Values.database.secrets.password.key . }} + {{- else if .Values.database.password }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-password + {{- else if .Values.postgresql.enabled }} + name: {{ .Release.Name }}-postgresql + key: password + {{- end }} + {{- end }} + volumeMounts: + - name: data-volume + mountPath: {{ .Values.xray.persistence.mountPath | quote }} + {{- if or (not .Values.xray.unifiedSecretInstallation) .Values.systemYamlOverride.existingSecret }} + - name: systemyaml + {{- else }} + - name: {{ include "xray.unifiedCustomSecretVolumeName" . }} + {{- end }} + {{- if .Values.systemYamlOverride.existingSecret }} + mountPath: "/tmp/etc/{{.Values.systemYamlOverride.dataKey}}" + subPath: {{ .Values.systemYamlOverride.dataKey }} + {{- else }} + mountPath: "/tmp/etc/system.yaml" + subPath: system.yaml + {{- end }} + {{- if or .Values.xray.customCertificates.enabled .Values.global.customCertificates.enabled .Values.rabbitmq.auth.tls.enabled .Values.global.rabbitmq.auth.tls.enabled }} + - name: copy-custom-certificates + image: {{ include "xray.getImageInfoByValue" (list . "initContainers") }} + imagePullPolicy: {{ .Values.initContainers.image.pullPolicy }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- tpl (omit .Values.containerSecurityContext "enabled" | toYaml) . | nindent 10 }} + {{- end }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'bash' + - '-c' + - > +{{ include "xray.copyCustomCerts" . | indent 10 }} +{{ include "xray.copyRabbitmqCustomCerts" . | indent 10 }} + volumeMounts: + - name: data-volume + mountPath: {{ .Values.xray.persistence.mountPath }} + {{- if or .Values.xray.customCertificates.enabled .Values.global.customCertificates.enabled }} + - name: ca-certs + mountPath: "/tmp/certs" + {{- end }} + {{- if or .Values.global.rabbitmq.auth.tls.enabled .Values.rabbitmq.auth.tls.enabled }} + - name: rabbitmq-ca-certs + mountPath: "/tmp/rabbitmqcerts" + {{- end }} + {{- end }} + {{- if .Values.waitForDatabase }} + {{- if .Values.postgresql.enabled }} + - name: "wait-for-db" + image: {{ include "xray.getImageInfoByValue" (list . "initContainers") }} + imagePullPolicy: {{ .Values.initContainers.image.pullPolicy }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- tpl (omit .Values.containerSecurityContext "enabled" | toYaml) . | nindent 10 }} + {{- end }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'bash' + - '-c' + - | + echo "Waiting for postgresql to come up" + ready=false; + while ! $ready; do echo waiting; + timeout 2s bash -c " + {{- with .Values.common.preStartCommand }} + echo "Running custom common preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + exec /opt/jfrog/router/app/bin/entrypoint-router.sh; + {{- with .Values.router.lifecycle }} + lifecycle: +{{ toYaml . | indent 10 }} + {{- end }} + env: + - name: JF_ROUTER_TOPOLOGY_LOCAL_REQUIREDSERVICETYPES + value: {{ include "xray.router.ipa.requiredServiceTypes" . }} + {{- if .Values.router.extraEnvVars }} + {{- tpl .Values.router.extraEnvVars . | nindent 8 }} + {{- end }} + ports: + - name: http-router + containerPort: {{ .Values.router.internalPort }} + volumeMounts: + - name: data-volume + mountPath: {{ .Values.router.persistence.mountPath | quote }} +{{- if or .Values.common.customVolumeMounts .Values.global.customVolumeMounts }} +{{ tpl (include "xray.customVolumeMounts" .) . | indent 8 }} +{{- end }} +{{- with .Values.router.customVolumeMounts }} +{{ tpl . $ | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.router.resources | indent 10 }} +{{- if .Values.router.startupProbe.enabled }} + startupProbe: +{{ tpl .Values.router.startupProbe.config . | indent 10 }} +{{- end }} +{{- if .Values.router.livenessProbe.enabled }} + livenessProbe: +{{ tpl .Values.router.livenessProbe.config . | indent 10 }} +{{- end }} +{{- if .Values.router.readinessProbe.enabled }} + readinessProbe: +{{ tpl .Values.router.readinessProbe.config . | indent 10 }} +{{- end }} + - name: {{ .Values.observability.name }} + image: {{ include "xray.getImageInfoByValue" (list . "observability") }} + imagePullPolicy: {{ .Values.observability.image.imagePullPolicy }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- tpl (omit .Values.containerSecurityContext "enabled" | toYaml) . | nindent 10 }} + {{- end }} + command: + - '/bin/sh' + - '-c' + - > + {{- with .Values.common.preStartCommand }} + echo "Running custom common preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + exec /opt/jfrog/observability/app/bin/entrypoint-observability.sh; + {{- with .Values.observability.lifecycle }} + lifecycle: +{{ toYaml . | indent 10 }} + {{- end }} + env: + {{- if .Values.observability.extraEnvVars }} + {{- tpl .Values.observability.extraEnvVars . | nindent 8 }} + {{- end }} + volumeMounts: + - name: data-volume + mountPath: "{{ .Values.observability.persistence.mountPath }}" + resources: +{{ toYaml .Values.observability.resources | indent 10 }} + {{- if .Values.observability.startupProbe.enabled }} + startupProbe: +{{ tpl .Values.observability.startupProbe.config . | indent 10 }} + {{- end }} + {{- if .Values.observability.livenessProbe.enabled }} + livenessProbe: +{{ tpl .Values.observability.livenessProbe.config . | indent 10 }} + {{- end }} + - name: {{ .Values.analysis.name }} + image: {{ include "xray.getImageInfoByValue" (list . "analysis") }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- tpl (omit .Values.containerSecurityContext "enabled" | toYaml) . | nindent 10 }} + {{- end }} + command: + - '/bin/bash' + - '-c' + - > + {{- with .Values.common.preStartCommand }} + echo "Running custom common preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + {{- with .Values.analysis.preStartCommand }} + echo "Running custom Analysis preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + exec /opt/jfrog/xray/app/bin/wrapper.sh; + {{- with .Values.analysis.lifecycle }} + lifecycle: +{{ toYaml . | indent 10 }} + {{- end }} + env: + - name: JF_SKIPENTLICCHECKFORCLOUD + value: "true" + {{ include "xray.envVariables" . | indent 8 }} + {{ include "xray.rabbitmqTlsEnvVariables" . | indent 8 }} + {{- if and .Values.rabbitmq.external.secrets (not .Values.common.rabbitmq.connectionConfigFromEnvironment) }} + - name: JF_SHARED_RABBITMQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ tpl .Values.rabbitmq.external.secrets.username.name . }} + key: {{ tpl .Values.rabbitmq.external.secrets.username.key . }} + - name: JF_SHARED_RABBITMQ_URL + valueFrom: + secretKeyRef: + name: {{ tpl .Values.rabbitmq.external.secrets.url.name . }} + key: {{ tpl .Values.rabbitmq.external.secrets.url.key . }} + {{- end }} + {{- if or .Values.database.secrets.user .Values.database.user }} + - name: JF_SHARED_DATABASE_USERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.user }} + name: {{ tpl .Values.database.secrets.user.name . }} + key: {{ tpl .Values.database.secrets.user.key . }} + {{- else if .Values.database.user }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-user + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.actualUsername .Values.database.actualUsername }} + - name: JF_SHARED_DATABASE_ACTUALUSERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.actualUsername }} + name: {{ tpl .Values.database.secrets.actualUsername.name . }} + key: {{ tpl .Values.database.secrets.actualUsername.key . }} + {{- else if .Values.database.actualUsername }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-actualUsername + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.url .Values.database.url }} + - name: JF_SHARED_DATABASE_URL + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.url }} + name: {{ tpl .Values.database.secrets.url.name . }} + key: {{ tpl .Values.database.secrets.url.key . }} + {{- else if .Values.database.url }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-url + {{- end }} + {{- end }} + {{- if .Values.common.rabbitmq.connectionConfigFromEnvironment }} + - name: JF_SHARED_RABBITMQ_USERNAME + value: {{ include "rabbitmq.user" .}} + - name: JF_SHARED_RABBITMQ_URL + value: {{ include "rabbitmq.url" .}} + {{- end }} + - name: XRAY_HA_NODE_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: XRAY_K8S_ENV + value: "true" + - name: EXECUTION_JOB_AES_KEY + valueFrom: + secretKeyRef: + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.executionServiceAesKeySecretName" . }} + {{- else if and .Values.xray.unifiedSecretInstallation (or .Values.xray.executionServiceAesKeySecretName .Values.global.executionServiceAesKeySecretName) }} + name: {{ template "xray.executionServiceAesKeySecretName" . }} + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: execution-service-aes-key + {{- if .Values.common.extraEnvVars }} + {{- tpl .Values.common.extraEnvVars . | nindent 8 }} + {{- end }} + {{- if .Values.analysis.extraEnvVars }} + {{- tpl .Values.analysis.extraEnvVars . | nindent 8 }} + {{- end }} + ports: + - containerPort: {{ .Values.analysis.internalPort }} + name: http-analysis + volumeMounts: + - name: data-volume + mountPath: "{{ .Values.xray.persistence.mountPath }}" +{{- if or .Values.common.customVolumeMounts .Values.global.customVolumeMounts }} +{{ tpl (include "xray.customVolumeMounts" .) . | indent 8 }} +{{- end }} +{{- with .Values.analysis.customVolumeMounts }} +{{ tpl . $ | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.analysis.resources | indent 10 }} +{{- if .Values.analysis.startupProbe.enabled }} + startupProbe: +{{ tpl .Values.analysis.startupProbe.config . | indent 10 }} +{{- end }} +{{- if .Values.analysis.livenessProbe.enabled }} + livenessProbe: +{{ tpl .Values.analysis.livenessProbe.config . | indent 10 }} +{{- end }} +{{- if .Values.sbom.enabled }} + - name: {{ .Values.sbom.name }} + image: {{ include "xray.getImageInfoByValue" (list . "sbom") }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- tpl (omit .Values.containerSecurityContext "enabled" | toYaml) . | nindent 10 }} + {{- end }} + command: + - '/bin/bash' + - '-c' + - > + {{- with .Values.common.preStartCommand }} + echo "Running custom common preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + {{- with .Values.sbom.preStartCommand }} + echo "Running custom Sbom preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + exec /opt/jfrog/xray/app/bin/wrapper.sh; + {{- with .Values.sbom.lifecycle }} + lifecycle: +{{ toYaml . | indent 10 }} + {{- end }} + env: + - name: JF_SKIPENTLICCHECKFORCLOUD + value: "true" + {{ include "xray.envVariables" . | indent 8 }} + {{ include "xray.rabbitmqTlsEnvVariables" . | indent 8 }} + {{- if and .Values.rabbitmq.external.secrets (not .Values.common.rabbitmq.connectionConfigFromEnvironment) }} + - name: JF_SHARED_RABBITMQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ tpl .Values.rabbitmq.external.secrets.username.name . }} + key: {{ tpl .Values.rabbitmq.external.secrets.username.key . }} + - name: JF_SHARED_RABBITMQ_URL + valueFrom: + secretKeyRef: + name: {{ tpl .Values.rabbitmq.external.secrets.url.name . }} + key: {{ tpl .Values.rabbitmq.external.secrets.url.key . }} + {{- end }} + {{- if or .Values.database.secrets.user .Values.database.user }} + - name: JF_SHARED_DATABASE_USERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.user }} + name: {{ tpl .Values.database.secrets.user.name . }} + key: {{ tpl .Values.database.secrets.user.key . }} + {{- else if .Values.database.user }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-user + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.actualUsername .Values.database.actualUsername }} + - name: JF_SHARED_DATABASE_ACTUALUSERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.actualUsername }} + name: {{ tpl .Values.database.secrets.actualUsername.name . }} + key: {{ tpl .Values.database.secrets.actualUsername.key . }} + {{- else if .Values.database.actualUsername }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-actualUsername + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.url .Values.database.url }} + - name: JF_SHARED_DATABASE_URL + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.url }} + name: {{ tpl .Values.database.secrets.url.name . }} + key: {{ tpl .Values.database.secrets.url.key . }} + {{- else if .Values.database.url }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-url + {{- end }} + {{- end }} + {{- if .Values.common.rabbitmq.connectionConfigFromEnvironment }} + - name: JF_SHARED_RABBITMQ_USERNAME + value: {{ include "rabbitmq.user" .}} + - name: JF_SHARED_RABBITMQ_URL + value: {{ include "rabbitmq.url" .}} + {{- end }} + - name: XRAY_HA_NODE_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: XRAY_K8S_ENV + value: "true" + - name: EXECUTION_JOB_AES_KEY + valueFrom: + secretKeyRef: + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.executionServiceAesKeySecretName" . }} + {{- else if and .Values.xray.unifiedSecretInstallation (or .Values.xray.executionServiceAesKeySecretName .Values.global.executionServiceAesKeySecretName) }} + name: {{ template "xray.executionServiceAesKeySecretName" . }} + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: execution-service-aes-key + {{- if .Values.common.extraEnvVars }} + {{- tpl .Values.common.extraEnvVars . | nindent 8 }} + {{- end }} + {{- if .Values.sbom.extraEnvVars }} + {{- tpl .Values.sbom.extraEnvVars . | nindent 8 }} + {{- end }} + ports: + - containerPort: {{ .Values.sbom.internalPort }} + name: http-sbom + volumeMounts: + - name: data-volume + mountPath: "{{ .Values.xray.persistence.mountPath }}" +{{- if or .Values.common.customVolumeMounts .Values.global.customVolumeMounts }} +{{ tpl (include "xray.customVolumeMounts" .) . | indent 8 }} +{{- end }} +{{- with .Values.sbom.customVolumeMounts }} +{{ tpl . $ | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.sbom.resources | indent 10 }} +{{- if .Values.sbom.startupProbe.enabled }} + startupProbe: +{{ tpl .Values.sbom.startupProbe.config . | indent 10 }} +{{- end }} +{{- if .Values.sbom.livenessProbe.enabled }} + livenessProbe: +{{ tpl .Values.sbom.livenessProbe.config . | indent 10 }} +{{- end }} +{{- end }} +{{- if .Values.panoramic.enabled }} + - name: {{ .Values.panoramic.name }} + image: {{ include "xray.getImageInfoByValue" (list . "panoramic") }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- tpl (omit .Values.containerSecurityContext "enabled" | toYaml) . | nindent 10 }} + {{- end }} + command: + - '/bin/bash' + - '-c' + - > + {{- with .Values.common.preStartCommand }} + echo "Running custom common preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + {{- with .Values.panoramic.preStartCommand }} + echo "Running custom panoramic preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + exec /opt/jfrog/xray/app/bin/wrapper.sh; + {{- with .Values.panoramic.lifecycle }} + lifecycle: +{{ toYaml . | indent 10 }} + {{- end }} + env: + - name: JF_SKIPENTLICCHECKFORCLOUD + value: "true" + {{ include "xray.envVariables" . | indent 8 }} + {{ include "xray.rabbitmqTlsEnvVariables" . | indent 8 }} + {{- if and .Values.rabbitmq.external.secrets (not .Values.common.rabbitmq.connectionConfigFromEnvironment) }} + - name: JF_SHARED_RABBITMQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ tpl .Values.rabbitmq.external.secrets.username.name . }} + key: {{ tpl .Values.rabbitmq.external.secrets.username.key . }} + - name: JF_SHARED_RABBITMQ_URL + valueFrom: + secretKeyRef: + name: {{ tpl .Values.rabbitmq.external.secrets.url.name . }} + key: {{ tpl .Values.rabbitmq.external.secrets.url.key . }} + {{- end }} + {{- if or .Values.database.secrets.user .Values.database.user }} + - name: JF_SHARED_DATABASE_USERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.user }} + name: {{ tpl .Values.database.secrets.user.name . }} + key: {{ tpl .Values.database.secrets.user.key . }} + {{- else if .Values.database.user }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-user + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.actualUsername .Values.database.actualUsername }} + - name: JF_SHARED_DATABASE_ACTUALUSERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.actualUsername }} + name: {{ tpl .Values.database.secrets.actualUsername.name . }} + key: {{ tpl .Values.database.secrets.actualUsername.key . }} + {{- else if .Values.database.actualUsername }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-actualUsername + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.url .Values.database.url }} + - name: JF_SHARED_DATABASE_URL + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.url }} + name: {{ tpl .Values.database.secrets.url.name . }} + key: {{ tpl .Values.database.secrets.url.key . }} + {{- else if .Values.database.url }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-url + {{- end }} + {{- end }} + {{- if .Values.common.rabbitmq.connectionConfigFromEnvironment }} + - name: JF_SHARED_RABBITMQ_USERNAME + value: {{ include "rabbitmq.user" .}} + - name: JF_SHARED_RABBITMQ_URL + value: {{ include "rabbitmq.url" .}} + {{- end }} + - name: XRAY_HA_NODE_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: XRAY_K8S_ENV + value: "true" + - name: EXECUTION_JOB_AES_KEY + valueFrom: + secretKeyRef: + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.executionServiceAesKeySecretName" . }} + {{- else if and .Values.xray.unifiedSecretInstallation (or .Values.xray.executionServiceAesKeySecretName .Values.global.executionServiceAesKeySecretName) }} + name: {{ template "xray.executionServiceAesKeySecretName" . }} + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: execution-service-aes-key + {{- if .Values.common.extraEnvVars }} + {{- tpl .Values.common.extraEnvVars . | nindent 8 }} + {{- end }} + {{- if .Values.panoramic.extraEnvVars }} + {{- tpl .Values.panoramic.extraEnvVars . | nindent 8 }} + {{- end }} + ports: + - containerPort: {{ .Values.panoramic.internalPort }} + name: http-panoramic + volumeMounts: + - name: data-volume + mountPath: "{{ .Values.xray.persistence.mountPath }}" +{{- if or .Values.common.customVolumeMounts .Values.global.customVolumeMounts }} +{{ tpl (include "xray.customVolumeMounts" .) . | indent 8 }} +{{- end }} +{{- with .Values.panoramic.customVolumeMounts }} +{{ tpl . $ | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.panoramic.resources | indent 10 }} +{{- if .Values.panoramic.startupProbe.enabled }} + startupProbe: +{{ tpl .Values.panoramic.startupProbe.config . | indent 10 }} +{{- end }} +{{- if .Values.panoramic.livenessProbe.enabled }} + livenessProbe: +{{ tpl .Values.panoramic.livenessProbe.config . | indent 10 }} +{{- end }} +{{- end }} + - name: {{ .Values.policyenforcer.name }} + image: {{ include "xray.getImageInfoByValue" (list . "policyenforcer") }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- tpl (omit .Values.containerSecurityContext "enabled" | toYaml) . | nindent 10 }} + {{- end }} + command: + - '/bin/bash' + - '-c' + - > + {{- with .Values.common.preStartCommand }} + echo "Running custom common preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + {{- with .Values.policyenforcer.preStartCommand }} + echo "Running custom policyenforcer preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + exec /opt/jfrog/xray/app/bin/wrapper.sh; + {{- with .Values.policyenforcer.lifecycle }} + lifecycle: +{{ toYaml . | indent 10 }} + {{- end }} + env: + - name: JF_SKIPENTLICCHECKFORCLOUD + value: "true" + {{ include "xray.envVariables" . | indent 8 }} + {{ include "xray.rabbitmqTlsEnvVariables" . | indent 8 }} + {{- if and .Values.rabbitmq.external.secrets (not .Values.common.rabbitmq.connectionConfigFromEnvironment) }} + - name: JF_SHARED_RABBITMQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ tpl .Values.rabbitmq.external.secrets.username.name . }} + key: {{ tpl .Values.rabbitmq.external.secrets.username.key . }} + - name: JF_SHARED_RABBITMQ_URL + valueFrom: + secretKeyRef: + name: {{ tpl .Values.rabbitmq.external.secrets.url.name . }} + key: {{ tpl .Values.rabbitmq.external.secrets.url.key . }} + {{- end }} + {{- if or .Values.database.secrets.user .Values.database.user }} + - name: JF_SHARED_DATABASE_USERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.user }} + name: {{ tpl .Values.database.secrets.user.name . }} + key: {{ tpl .Values.database.secrets.user.key . }} + {{- else if .Values.database.user }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-user + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.actualUsername .Values.database.actualUsername }} + - name: JF_SHARED_DATABASE_ACTUALUSERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.actualUsername }} + name: {{ tpl .Values.database.secrets.actualUsername.name . }} + key: {{ tpl .Values.database.secrets.actualUsername.key . }} + {{- else if .Values.database.actualUsername }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-actualUsername + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.url .Values.database.url }} + - name: JF_SHARED_DATABASE_URL + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.url }} + name: {{ tpl .Values.database.secrets.url.name . }} + key: {{ tpl .Values.database.secrets.url.key . }} + {{- else if .Values.database.url }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-url + {{- end }} + {{- end }} + {{- if .Values.common.rabbitmq.connectionConfigFromEnvironment }} + - name: JF_SHARED_RABBITMQ_USERNAME + value: {{ include "rabbitmq.user" .}} + - name: JF_SHARED_RABBITMQ_URL + value: {{ include "rabbitmq.url" .}} + {{- end }} + - name: XRAY_HA_NODE_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: XRAY_K8S_ENV + value: "true" + - name: EXECUTION_JOB_AES_KEY + valueFrom: + secretKeyRef: + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.executionServiceAesKeySecretName" . }} + {{- else if and .Values.xray.unifiedSecretInstallation (or .Values.xray.executionServiceAesKeySecretName .Values.global.executionServiceAesKeySecretName) }} + name: {{ template "xray.executionServiceAesKeySecretName" . }} + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: execution-service-aes-key + {{- if .Values.common.extraEnvVars }} + {{- tpl .Values.common.extraEnvVars . | nindent 8 }} + {{- end }} + {{- if .Values.policyenforcer.extraEnvVars }} + {{- tpl .Values.policyenforcer.extraEnvVars . | nindent 8 }} + {{- end }} + ports: + - containerPort: {{ .Values.policyenforcer.internalPort }} + name: http-polenf + volumeMounts: + - name: data-volume + mountPath: "{{ .Values.xray.persistence.mountPath }}" +{{- if or .Values.common.customVolumeMounts .Values.global.customVolumeMounts }} +{{ tpl (include "xray.customVolumeMounts" .) . | indent 8 }} +{{- end }} +{{- with .Values.policyenforcer.customVolumeMounts }} +{{ tpl . $ | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.policyenforcer.resources | indent 10 }} +{{- if .Values.policyenforcer.startupProbe.enabled }} + startupProbe: +{{ tpl .Values.policyenforcer.startupProbe.config . | indent 10 }} +{{- end }} +{{- if .Values.policyenforcer.livenessProbe.enabled }} + livenessProbe: +{{ tpl .Values.policyenforcer.livenessProbe.config . | indent 10 }} +{{- end }} + - name: {{ .Values.indexer.name }} + image: {{ include "xray.getImageInfoByValue" (list . "indexer") }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- tpl (omit .Values.containerSecurityContext "enabled" | toYaml) . | nindent 10 }} + {{- end }} + command: + - '/bin/bash' + - '-c' + - > + {{- with .Values.common.preStartCommand }} + echo "Running custom common preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + {{- with .Values.indexer.preStartCommand }} + echo "Running custom Indexer preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + exec /opt/jfrog/xray/app/bin/wrapper.sh; + {{- with .Values.indexer.lifecycle }} + lifecycle: +{{ toYaml . | indent 10 }} + {{- end }} + env: + - name: JF_SKIPENTLICCHECKFORCLOUD + value: "true" + {{ include "xray.envVariables" . | indent 8 }} + {{ include "xray.rabbitmqTlsEnvVariables" . | indent 8 }} + {{- if and .Values.rabbitmq.external.secrets (not .Values.common.rabbitmq.connectionConfigFromEnvironment) }} + - name: JF_SHARED_RABBITMQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ tpl .Values.rabbitmq.external.secrets.username.name . }} + key: {{ tpl .Values.rabbitmq.external.secrets.username.key . }} + - name: JF_SHARED_RABBITMQ_URL + valueFrom: + secretKeyRef: + name: {{ tpl .Values.rabbitmq.external.secrets.url.name . }} + key: {{ tpl .Values.rabbitmq.external.secrets.url.key . }} + {{- end }} + {{- if or .Values.database.secrets.user .Values.database.user }} + - name: JF_SHARED_DATABASE_USERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.user }} + name: {{ tpl .Values.database.secrets.user.name . }} + key: {{ tpl .Values.database.secrets.user.key . }} + {{- else if .Values.database.user }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-user + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.actualUsername .Values.database.actualUsername }} + - name: JF_SHARED_DATABASE_ACTUALUSERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.actualUsername }} + name: {{ tpl .Values.database.secrets.actualUsername.name . }} + key: {{ tpl .Values.database.secrets.actualUsername.key . }} + {{- else if .Values.database.actualUsername }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-actualUsername + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.url .Values.database.url }} + - name: JF_SHARED_DATABASE_URL + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.url }} + name: {{ tpl .Values.database.secrets.url.name . }} + key: {{ tpl .Values.database.secrets.url.key . }} + {{- else if .Values.database.url }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-url + {{- end }} + {{- end }} + {{- if .Values.common.rabbitmq.connectionConfigFromEnvironment }} + - name: JF_SHARED_RABBITMQ_USERNAME + value: {{ include "rabbitmq.user" .}} + - name: JF_SHARED_RABBITMQ_URL + value: {{ include "rabbitmq.url" .}} + {{- end }} + - name: XRAY_HA_NODE_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: XRAY_K8S_ENV + value: "true" + {{- if .Values.common.extraEnvVars }} + {{- tpl .Values.common.extraEnvVars . | nindent 8 }} + {{- end }} + {{- if .Values.indexer.extraEnvVars }} + {{- tpl .Values.indexer.extraEnvVars . | nindent 8 }} + {{- end }} + ports: + - containerPort: {{ .Values.indexer.internalPort }} + name: http-indexer + volumeMounts: + - name: data-volume + mountPath: "{{ .Values.xray.persistence.mountPath }}" +{{- if or .Values.common.customVolumeMounts .Values.global.customVolumeMounts }} +{{ tpl (include "xray.customVolumeMounts" .) . | indent 8 }} +{{- end }} +{{- with .Values.indexer.customVolumeMounts }} +{{ tpl . $ | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.indexer.resources | indent 10 }} +{{- if .Values.indexer.startupProbe.enabled }} + startupProbe: +{{ tpl .Values.indexer.startupProbe.config . | indent 10 }} +{{- end }} +{{- if .Values.indexer.livenessProbe.enabled }} + livenessProbe: +{{ tpl .Values.indexer.livenessProbe.config . | indent 10 }} +{{- end }} + - name: {{ .Values.persist.name }} + image: {{ include "xray.getImageInfoByValue" (list . "persist") }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- tpl (omit .Values.containerSecurityContext "enabled" | toYaml) . | nindent 10 }} + {{- end }} + command: + - '/bin/bash' + - '-c' + - > + {{- with .Values.common.preStartCommand }} + echo "Running custom common preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + {{- with .Values.persist.preStartCommand }} + echo "Running custom Persist preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + exec /opt/jfrog/xray/app/bin/wrapper.sh; + {{- with .Values.persist.lifecycle }} + lifecycle: +{{ toYaml . | indent 10 }} + {{- end }} + env: + - name: JF_SKIPENTLICCHECKFORCLOUD + value: "true" + {{ include "xray.envVariables" . | indent 8 }} + {{ include "xray.rabbitmqTlsEnvVariables" . | indent 8 }} + {{- if and .Values.rabbitmq.external.secrets (not .Values.common.rabbitmq.connectionConfigFromEnvironment) }} + - name: JF_SHARED_RABBITMQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ tpl .Values.rabbitmq.external.secrets.username.name . }} + key: {{ tpl .Values.rabbitmq.external.secrets.username.key . }} + - name: JF_SHARED_RABBITMQ_URL + valueFrom: + secretKeyRef: + name: {{ tpl .Values.rabbitmq.external.secrets.url.name . }} + key: {{ tpl .Values.rabbitmq.external.secrets.url.key . }} + {{- end }} + {{- if or .Values.database.secrets.user .Values.database.user }} + - name: JF_SHARED_DATABASE_USERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.user }} + name: {{ tpl .Values.database.secrets.user.name . }} + key: {{ tpl .Values.database.secrets.user.key . }} + {{- else if .Values.database.user }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-user + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.actualUsername .Values.database.actualUsername }} + - name: JF_SHARED_DATABASE_ACTUALUSERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.actualUsername }} + name: {{ tpl .Values.database.secrets.actualUsername.name . }} + key: {{ tpl .Values.database.secrets.actualUsername.key . }} + {{- else if .Values.database.actualUsername }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-actualUsername + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.url .Values.database.url }} + - name: JF_SHARED_DATABASE_URL + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.url }} + name: {{ tpl .Values.database.secrets.url.name . }} + key: {{ tpl .Values.database.secrets.url.key . }} + {{- else if .Values.database.url }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-url + {{- end }} + {{- end }} + {{- if .Values.common.rabbitmq.connectionConfigFromEnvironment }} + - name: JF_SHARED_RABBITMQ_USERNAME + value: {{ include "rabbitmq.user" .}} + - name: JF_SHARED_RABBITMQ_URL + value: {{ include "rabbitmq.url" .}} + {{- end }} + - name: XRAY_K8S_ENV + value: "true" + {{- if .Values.common.extraEnvVars }} + {{- tpl .Values.common.extraEnvVars . | nindent 8 }} + {{- end }} + {{- if .Values.persist.extraEnvVars }} + {{- tpl .Values.persist.extraEnvVars . | nindent 8 }} + {{- end }} + ports: + - containerPort: {{ .Values.persist.internalPort }} + name: http-persist + volumeMounts: + - name: data-volume + mountPath: "{{ .Values.xray.persistence.mountPath }}" +{{- if or .Values.common.customVolumeMounts .Values.global.customVolumeMounts }} +{{ tpl (include "xray.customVolumeMounts" .) . | indent 8 }} +{{- end }} +{{- with .Values.persist.customVolumeMounts }} +{{ tpl . $ | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.persist.resources | indent 10 }} +{{- if .Values.persist.startupProbe.enabled }} + startupProbe: +{{ tpl .Values.persist.startupProbe.config . | indent 10 }} +{{- end }} +{{- if .Values.persist.livenessProbe.enabled }} + livenessProbe: +{{ tpl .Values.persist.livenessProbe.config . | indent 10 }} +{{- end }} + {{- $mountPath := .Values.xray.persistence.mountPath }} + {{- range .Values.xray.loggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: {{ include "xray.getImageInfoByValue" (list $ "initContainers") }} + imagePullPolicy: {{ $.Values.initContainers.image.pullPolicy }} + {{- if $.Values.containerSecurityContext.enabled }} + securityContext: {{- omit $.Values.containerSecurityContext "enabled" | toYaml | nindent 10 }} + {{- end }} + command: + - 'sh' + - '-c' + - 'sh /scripts/tail-log.sh {{ $mountPath }}/log {{ . }}' + volumeMounts: + - name: data-volume + mountPath: {{ $mountPath }} + - name: tail-logger-script + mountPath: /scripts/tail-log.sh + subPath: tail-log.sh + resources: +{{ toYaml $.Values.xray.loggersResources | indent 10 }} + {{- end }} + {{- if .Values.filebeat.enabled }} + - name: {{ .Values.filebeat.name }} + image: "{{ .Values.filebeat.image.repository }}:{{ .Values.filebeat.image.version }}" + imagePullPolicy: {{ .Values.filebeat.image.pullPolicy }} + args: + - "-e" + - "-E" + - "http.enabled=true" + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- tpl (omit .Values.containerSecurityContext "enabled" | toYaml) . | nindent 10 }} + {{- end }} + volumeMounts: + - name: filebeat-config + mountPath: /usr/share/filebeat/filebeat.yml + readOnly: true + subPath: filebeat.yml + - name: data-volume + mountPath: "{{ .Values.xray.persistence.mountPath }}" + livenessProbe: +{{ toYaml .Values.filebeat.livenessProbe | indent 10 }} + readinessProbe: +{{ toYaml .Values.filebeat.readinessProbe | indent 10 }} + resources: +{{ toYaml .Values.filebeat.resources | indent 10 }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriod }} + {{- end }} +{{- if or .Values.common.customSidecarContainers .Values.global.customSidecarContainers }} +{{ tpl (include "xray.customSidecarContainers" .) . | indent 6 }} +{{- end }} + {{- if or .Values.xray.nodeSelector .Values.global.nodeSelector }} +{{ tpl (include "xray.nodeSelector" .) . | indent 6 }} + {{- end }} + {{- if .Values.ipa.affinity }} + {{- with .Values.ipa.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- else if eq .Values.ipa.podAntiAffinity.type "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + topologyKey: {{ .Values.ipa.podAntiAffinity.topologyKey }} + labelSelector: + matchLabels: + app: {{ template "xray.name" . }} + release: {{ .Release.Name }} + servicename: ipa + {{- else if eq .Values.ipa.podAntiAffinity.type "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: {{ .Values.ipa.podAntiAffinity.topologyKey }} + labelSelector: + matchLabels: + app: {{ template "xray.name" . }} + release: {{ .Release.Name }} + servicename: ipa + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + ########## External secrets ########### + {{- if or .Values.xray.customCertificates.enabled .Values.global.customCertificates.enabled }} + - name: ca-certs + secret: + secretName: {{ default .Values.global.customCertificates.certificateSecretName .Values.xray.customCertificates.certificateSecretName }} + {{- end }} + {{- if .Values.systemYamlOverride.existingSecret }} + - name: systemyaml + secret: + secretName: {{ .Values.systemYamlOverride.existingSecret }} + {{- end }} + ############ Config map, Volumes and Custom Volumes ############## + {{- if .Values.xray.loggers }} + - name: tail-logger-script + configMap: + name: {{ template "xray.fullname" . }}-logger + {{- end }} + - name: data-volume + emptyDir: + sizeLimit: {{ .Values.common.persistence.size }} + {{- if and .Values.xray.unifiedSecretInstallation (eq (include "xray.checkDuplicateUnifiedCustomVolume" .) "false" ) }} + ######### unifiedSecretInstallation ########### + - name: {{ include "xray.unifiedCustomSecretVolumeName" . }} + secret: + secretName: {{ template "xray.name" . }}-unified-secret + {{- else if not .Values.xray.unifiedSecretInstallation }} + ######### Non unifiedSecretInstallation ########### + {{- if not .Values.systemYamlOverride.existingSecret }} + - name: systemyaml + secret: + secretName: {{ printf "%s-%s" (include "xray.fullname" .) "system-yaml" }} + {{- end }} + {{- end }} + {{- if or .Values.global.rabbitmq.auth.tls.enabled .Values.rabbitmq.auth.tls.enabled }} + - name: rabbitmq-ca-certs + secret: + secretName: {{ template "xray.rabbitmqCustomCertificateshandler" . }} + {{- end }} + +{{- if or .Values.common.customVolumes .Values.global.customVolumes }} +{{ tpl (include "xray.customVolumes" .) . | indent 6 }} +{{- end }} + {{- if .Values.filebeat.enabled }} + - name: filebeat-config + configMap: + name: {{ template "xray.fullname" . }}-filebeat-config + {{- end }} + {{- if .Values.common.configMaps }} + - name: xray-configmaps + configMap: + name: {{ template "xray.fullname" . }}-configmaps + {{- end }} +{{- end }} \ No newline at end of file diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-ipa-svc.yaml b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-ipa-svc.yaml new file mode 100644 index 000000000..dfd1ac8f0 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-ipa-svc.yaml @@ -0,0 +1,60 @@ +{{- if .Values.splitXraytoSeparateDeployments.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: xray-ipa-headless + labels: + app: {{ template "xray.name" . }} + chart: {{ template "xray.chart" . }} + component: {{ .Values.xray.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + servicename: ipa + {{- with .Values.xray.labels }} +{{ toYaml . | indent 4 }} + {{- end }} +spec: + clusterIP: None + type: ClusterIP + {{- if .additionalSpec }} +{{ tpl .additionalSpec $ | indent 2 }} + {{- end }} + ports: + - name: http-analysis + port: {{ .Values.analysis.externalPort }} + protocol: TCP + targetPort: {{ .Values.analysis.internalPort }} + - name: http-indexer + port: {{ .Values.indexer.externalPort }} + protocol: TCP + targetPort: {{ .Values.indexer.internalPort }} + - name: http-persist + port: {{ .Values.persist.externalPort }} + protocol: TCP + targetPort: {{ .Values.persist.internalPort }} + - name: http-router + port: {{ .Values.router.externalPort }} + protocol: TCP + targetPort: {{ .Values.router.internalPort }} +{{- if .Values.sbom.enabled }} + - name: http-sbom + port: {{ .Values.sbom.externalPort }} + protocol: TCP + targetPort: {{ .Values.sbom.internalPort }} +{{- end }} +{{- if .Values.panoramic.enabled }} + - name: http-panoramic + port: {{ .Values.panoramic.externalPort }} + protocol: TCP + targetPort: {{ .Values.panoramic.internalPort }} +{{- end }} + - name: http-polenf + port: {{ .Values.policyenforcer.externalPort }} + protocol: TCP + targetPort: {{ .Values.policyenforcer.internalPort }} + selector: + app: {{ template "xray.name" . }} + component: {{ .Values.xray.name }} + release: {{ .Release.Name }} + servicename: ipa +{{- end }} \ No newline at end of file diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-keda-hpa-ipa.yaml b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-keda-hpa-ipa.yaml new file mode 100644 index 000000000..36a1128e3 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-keda-hpa-ipa.yaml @@ -0,0 +1,47 @@ +{{- if and (not .Values.splitXraytoSeparateDeployments.gradualUpgrade) .Values.splitXraytoSeparateDeployments.enabled }} +{{- if and (.Values.autoscalingIpa.enabled) (eq .Values.autoscalingIpa.keda.enabled true) }} +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + labels: + app: {{ template "xray.name" . }} + chart: {{ template "xray.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "xray.fullname" . }}-ipa + {{- if or .Values.global.autoscaling.keda.annotations .Values.autoscalingIpa.keda.annotations }} + annotations: + {{- with .Values.global.autoscaling.keda.annotations }} +{{ toYaml . | indent 4 }} + {{- end }} + {{- with .Values.autoscalingIpa.keda.annotations }} +{{ toYaml . | indent 4 }} + {{- end }} + {{- end }} +spec: + scaleTargetRef: + kind: Deployment + name: {{ template "xray.fullname" . }}-ipa + minReplicaCount: {{ .Values.autoscalingIpa.minReplicas }} + maxReplicaCount: {{ .Values.autoscalingIpa.maxReplicas }} + pollingInterval: {{ .Values.autoscalingIpa.keda.pollingInterval }} + cooldownPeriod: {{ .Values.autoscalingIpa.keda.cooldownPeriod }} + advanced: + horizontalPodAutoscalerConfig: + behavior: + scaleUp: + {{- .Values.autoscalingIpa.keda.scaleUp | toYaml | nindent 10 }} + scaleDown: + {{- .Values.autoscalingIpa.keda.scaleDown | toYaml | nindent 10 }} + triggers: + {{- include "xray.autoscalingQueuesIpa" . | indent 4 }} + - type: cpu + metricType: Utilization + metadata: + value: "{{ .Values.autoscalingIpa.targetCPUUtilizationPercentage }}" + - type: memory + metricType: Utilization + metadata: + value: "{{ .Values.autoscalingIpa.targetMemoryUtilizationPercentage }}" +{{- end }} +{{- end }} \ No newline at end of file diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-keda-hpa-server.yaml b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-keda-hpa-server.yaml new file mode 100644 index 000000000..98f7856f6 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-keda-hpa-server.yaml @@ -0,0 +1,47 @@ +{{- if and (not .Values.splitXraytoSeparateDeployments.gradualUpgrade) .Values.splitXraytoSeparateDeployments.enabled }} +{{- if and (.Values.autoscalingServer.enabled) (eq .Values.autoscalingServer.keda.enabled true) }} +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + labels: + app: {{ template "xray.name" . }} + chart: {{ template "xray.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "xray.fullname" . }}-server + {{- if or .Values.global.autoscaling.keda.annotations .Values.autoscalingServer.keda.annotations }} + annotations: + {{- with .Values.global.autoscaling.keda.annotations }} +{{ toYaml . | indent 4 }} + {{- end }} + {{- with .Values.autoscalingServer.keda.annotations }} +{{ toYaml . | indent 4 }} + {{- end }} + {{- end }} +spec: + scaleTargetRef: + kind: Deployment + name: {{ template "xray.fullname" . }}-server + minReplicaCount: {{ .Values.autoscalingServer.minReplicas }} + maxReplicaCount: {{ .Values.autoscalingServer.maxReplicas }} + pollingInterval: {{ .Values.autoscalingServer.keda.pollingInterval }} + cooldownPeriod: {{ .Values.autoscalingServer.keda.cooldownPeriod }} + advanced: + horizontalPodAutoscalerConfig: + behavior: + scaleUp: + {{- .Values.autoscalingServer.keda.scaleUp | toYaml | nindent 10 }} + scaleDown: + {{- .Values.autoscalingServer.keda.scaleDown | toYaml | nindent 10 }} + triggers: + {{- include "xray.autoscalingQueuesServer" . | indent 4 }} + - type: cpu + metricType: Utilization + metadata: + value: "{{ .Values.autoscalingServer.targetCPUUtilizationPercentage }}" + - type: memory + metricType: Utilization + metadata: + value: "{{ .Values.autoscalingServer.targetMemoryUtilizationPercentage }}" +{{- end }} +{{- end }} \ No newline at end of file diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-keda-hpa.yaml b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-keda-hpa.yaml new file mode 100644 index 000000000..ad830e4ff --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-keda-hpa.yaml @@ -0,0 +1,47 @@ +{{- if or (and .Values.splitXraytoSeparateDeployments.gradualUpgrade .Values.splitXraytoSeparateDeployments.enabled) (not .Values.splitXraytoSeparateDeployments.enabled) }} +{{- if and (.Values.autoscaling.enabled) (eq .Values.autoscaling.keda.enabled true) }} +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + labels: + app: {{ template "xray.name" . }} + chart: {{ template "xray.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "xray.fullname" . }} + {{- if or .Values.global.autoscaling.keda.annotations .Values.autoscaling.keda.annotations }} + annotations: + {{- with .Values.global.autoscaling.keda.annotations }} +{{ toYaml . | indent 4 }} + {{- end }} + {{- with .Values.autoscaling.keda.annotations }} +{{ toYaml . | indent 4 }} + {{- end }} + {{- end }} +spec: + scaleTargetRef: + kind: StatefulSet + name: {{ template "xray.fullname" . }} + minReplicaCount: {{ .Values.autoscaling.minReplicas }} + maxReplicaCount: {{ .Values.autoscaling.maxReplicas }} + pollingInterval: {{ .Values.autoscaling.keda.pollingInterval }} + cooldownPeriod: {{ .Values.autoscaling.keda.cooldownPeriod }} + advanced: + horizontalPodAutoscalerConfig: + behavior: + scaleUp: + {{- .Values.autoscaling.keda.scaleUp | toYaml | nindent 10 }} + scaleDown: + {{- .Values.autoscaling.keda.scaleDown | toYaml | nindent 10 }} + triggers: + {{- include "xray.autoscalingQueues" . | indent 4 }} + - type: cpu + metricType: Utilization + metadata: + value: "{{ .Values.autoscaling.targetCPUUtilizationPercentage }}" + - type: memory + metricType: Utilization + metadata: + value: "{{ .Values.autoscaling.targetMemoryUtilizationPercentage }}" +{{- end }} +{{- end }} \ No newline at end of file diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-keda-secret.yaml b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-keda-secret.yaml new file mode 100644 index 000000000..c9c0b9cb9 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-keda-secret.yaml @@ -0,0 +1,13 @@ +{{- if or .Values.autoscaling.keda.enabled .Values.autoscalingServer.keda.enabled .Values.autoscalingIpa.keda.enabled }} +apiVersion: v1 +kind: Secret +metadata: + labels: + app: {{ template "xray.name" . }} + chart: {{ template "xray.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: keda-rabbitmq-secret +stringData: + host: {{ include "rabbitmq.urlWithCreds" . }} +{{- end }} \ No newline at end of file diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-keda-trigger-authentication.yaml b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-keda-trigger-authentication.yaml new file mode 100644 index 000000000..aa50a6b6f --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-keda-trigger-authentication.yaml @@ -0,0 +1,16 @@ +{{- if or .Values.autoscaling.keda.enabled .Values.autoscalingServer.keda.enabled .Values.autoscalingIpa.keda.enabled }} +apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + labels: + app: {{ template "xray.name" . }} + chart: {{ template "xray.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: keda-trigger-auth-rabbitmq-conn-xray +spec: + secretTargetRef: + - parameter: host + name: keda-rabbitmq-secret + key: host +{{- end }} \ No newline at end of file diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-networkpolicy.yaml b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-networkpolicy.yaml new file mode 100644 index 000000000..c278068c3 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-networkpolicy.yaml @@ -0,0 +1,33 @@ +{{- range .Values.networkpolicy }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ template "xray.fullname" $ }}-{{ .name }}-networkpolicy + labels: + app: {{ template "xray.name" $ }} + release: {{ $.Release.Name }} + component: {{ $.Values.xray.name }} +spec: +{{- if .podSelector }} + podSelector: +{{ .podSelector | toYaml | trimSuffix "\n" | indent 4 -}} +{{ else }} + podSelector: {} +{{- end }} + policyTypes: + {{- if .ingress }} + - Ingress + {{- end }} + {{- if .egress }} + - Egress + {{- end }} +{{- if .ingress }} + ingress: +{{ .ingress | toYaml | trimSuffix "\n" | indent 2 -}} +{{- end }} +{{- if .egress }} + egress: +{{ .egress | toYaml | trimSuffix "\n" | indent 2 -}} +{{- end }} +--- +{{- end -}} \ No newline at end of file diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-pdb.yaml b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-pdb.yaml new file mode 100644 index 000000000..0a15a517d --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-pdb.yaml @@ -0,0 +1,23 @@ +{{- if .Values.xray.minAvailable -}} +{{- if semverCompare " + if [[ -e "{{ .Values.xray.persistence.mountPath }}/etc/filebeat.yaml" ]]; then chmod 644 {{ .Values.xray.persistence.mountPath }}/etc/filebeat.yaml; fi; + echo "Copy system.yaml to {{ .Values.xray.persistence.mountPath }}/etc"; + mkdir -p {{ .Values.xray.persistence.mountPath }}/etc; + {{- if .Values.systemYamlOverride.existingSecret }} + cp -fv /tmp/etc/{{ .Values.systemYamlOverride.dataKey }} {{ .Values.xray.persistence.mountPath }}/etc/system.yaml; + {{- else }} + cp -fv /tmp/etc/system.yaml {{ .Values.xray.persistence.mountPath }}/etc/system.yaml; + {{- end }} + echo "Remove {{ .Values.xray.persistence.mountPath }}/lost+found folder if exists"; + rm -rfv {{ .Values.xray.persistence.mountPath }}/lost+found; + {{- if or .Values.xray.joinKey .Values.xray.joinKeySecretName .Values.global.joinKey .Values.global.joinKeySecretName }} + echo "Copy joinKey to {{ .Values.xray.persistence.mountPath }}/etc/security"; + mkdir -p {{ .Values.xray.persistence.mountPath }}/etc/security; + echo ${XRAY_JOIN_KEY} > {{ .Values.xray.persistence.mountPath }}/etc/security/join.key; + {{- end }} + {{- if or .Values.xray.masterKey .Values.xray.masterKeySecretName .Values.global.masterKey .Values.global.masterKeySecretName }} + echo "Copy masterKey to {{ .Values.xray.persistence.mountPath }}/etc/security"; + mkdir -p {{ .Values.xray.persistence.mountPath }}/etc/security; + echo ${XRAY_MASTER_KEY} > {{ .Values.xray.persistence.mountPath }}/etc/security/master.key; + {{- end }} + if set | grep -q "^XRAY_RABBITMQ_PASSWORD="; then + echo "Copy rabbitmq password to {{ .Values.xray.persistence.mountPath }}/etc/security"; + mkdir -p {{ .Values.xray.persistence.mountPath }}/etc/security; + echo ${XRAY_RABBITMQ_PASSWORD} > {{ .Values.xray.persistence.mountPath }}/etc/security/rabbitmq.password; + else + if test -f "{{ .Values.xray.persistence.mountPath }}/etc/security/rabbitmq.password"; then + echo "XRAY_RABBITMQ_PASSWORD is not set, removing existing rabbitmq.password file."; + rm -f {{ .Values.xray.persistence.mountPath }}/etc/security/rabbitmq.password; + fi + fi + {{ if or .Values.database.secrets.password .Values.database.password .Values.postgresql.enabled }} + if set | grep -q "^XRAY_POSTGRES_PASSWORD="; then + echo "Copy postgres password to {{ .Values.xray.persistence.mountPath }}/etc/security"; + mkdir -p {{ .Values.xray.persistence.mountPath }}/etc/security; + echo ${XRAY_POSTGRES_PASSWORD} > {{ .Values.xray.persistence.mountPath }}/etc/security/postgres.password; + else + if test -f "{{ .Values.xray.persistence.mountPath }}/etc/security/postgres.password"; then + echo "XRAY_POSTGRES_PASSWORD is not set, removing existing postgres.password file."; + rm -f {{ .Values.xray.persistence.mountPath }}/etc/security/postgres.password; + fi + fi + {{- end }} + env: + {{- if or .Values.xray.joinKey .Values.xray.joinKeySecretName .Values.global.joinKey .Values.global.joinKeySecretName }} + - name: XRAY_JOIN_KEY + valueFrom: + secretKeyRef: + {{- if or (not .Values.xray.unifiedSecretInstallation) (or .Values.xray.joinKeySecretName .Values.global.joinKeySecretName) }} + name: {{ include "xray.joinKeySecretName" . }} + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: join-key + {{- end }} + {{- if or .Values.xray.masterKey .Values.xray.masterKeySecretName .Values.global.masterKey .Values.global.masterKeySecretName }} + - name: XRAY_MASTER_KEY + valueFrom: + secretKeyRef: + {{- if or (not .Values.xray.unifiedSecretInstallation) (or .Values.xray.masterKeySecretName .Values.global.masterKeySecretName) }} + name: {{ include "xray.masterKeySecretName" . }} + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: master-key + {{- end }} + {{- if and .Values.rabbitmq.external.secrets (not .Values.common.rabbitmq.connectionConfigFromEnvironment) }} + - name: XRAY_RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ tpl .Values.rabbitmq.external.secrets.password.name . }} + key: {{ tpl .Values.rabbitmq.external.secrets.password.key . }} + {{- end }} + {{- if .Values.common.rabbitmq.connectionConfigFromEnvironment }} + - name: XRAY_RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "rabbitmq.passwordSecretName" .}} + key: rabbitmq-password + {{- end }} + {{ if or .Values.database.secrets.password .Values.database.password .Values.postgresql.enabled }} + - name: XRAY_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.password }} + name: {{ tpl .Values.database.secrets.password.name . }} + key: {{ tpl .Values.database.secrets.password.key . }} + {{- else if .Values.database.password }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-password + {{- else if .Values.postgresql.enabled }} + name: {{ .Release.Name }}-postgresql + key: password + {{- end }} + {{- end }} + volumeMounts: + - name: data-volume + mountPath: {{ .Values.xray.persistence.mountPath | quote }} + {{- if or (not .Values.xray.unifiedSecretInstallation) .Values.systemYamlOverride.existingSecret }} + - name: systemyaml + {{- else }} + - name: {{ include "xray.unifiedCustomSecretVolumeName" . }} + {{- end }} + {{- if .Values.systemYamlOverride.existingSecret }} + mountPath: "/tmp/etc/{{.Values.systemYamlOverride.dataKey}}" + subPath: {{ .Values.systemYamlOverride.dataKey }} + {{- else }} + mountPath: "/tmp/etc/system.yaml" + subPath: system.yaml + {{- end }} + {{- if or .Values.xray.customCertificates.enabled .Values.global.customCertificates.enabled .Values.rabbitmq.auth.tls.enabled .Values.global.rabbitmq.auth.tls.enabled }} + - name: copy-custom-certificates + image: {{ include "xray.getImageInfoByValue" (list . "initContainers") }} + imagePullPolicy: {{ .Values.initContainers.image.pullPolicy }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- tpl (omit .Values.containerSecurityContext "enabled" | toYaml) . | nindent 10 }} + {{- end }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'bash' + - '-c' + - > +{{ include "xray.copyCustomCerts" . | indent 10 }} +{{ include "xray.copyRabbitmqCustomCerts" . | indent 10 }} + volumeMounts: + - name: data-volume + mountPath: {{ .Values.xray.persistence.mountPath }} + {{- if or .Values.xray.customCertificates.enabled .Values.global.customCertificates.enabled }} + - name: ca-certs + mountPath: "/tmp/certs" + {{- end }} + {{- if or .Values.global.rabbitmq.auth.tls.enabled .Values.rabbitmq.auth.tls.enabled }} + - name: rabbitmq-ca-certs + mountPath: "/tmp/rabbitmqcerts" + {{- end }} + {{- end }} + {{- if .Values.waitForDatabase }} + {{- if .Values.postgresql.enabled }} + - name: "wait-for-db" + image: {{ include "xray.getImageInfoByValue" (list . "initContainers") }} + imagePullPolicy: {{ .Values.initContainers.image.pullPolicy }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- tpl (omit .Values.containerSecurityContext "enabled" | toYaml) . | nindent 10 }} + {{- end }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'bash' + - '-c' + - | + echo "Waiting for postgresql to come up" + ready=false; + while ! $ready; do echo waiting; + timeout 2s bash -c " + {{- with .Values.common.preStartCommand }} + echo "Running custom common preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + exec /opt/jfrog/router/app/bin/entrypoint-router.sh; + {{- with .Values.router.lifecycle }} + lifecycle: +{{ toYaml . | indent 10 }} + {{- end }} + env: + - name: JF_ROUTER_TOPOLOGY_LOCAL_REQUIREDSERVICETYPES + value: {{ include "xray.router.server.requiredServiceTypes" . }} + {{- if .Values.router.extraEnvVars }} + {{- tpl .Values.router.extraEnvVars . | nindent 8 }} + {{- end }} + ports: + - name: http-router + containerPort: {{ .Values.router.internalPort }} + volumeMounts: + - name: data-volume + mountPath: {{ .Values.router.persistence.mountPath | quote }} +{{- if or .Values.common.customVolumeMounts .Values.global.customVolumeMounts }} +{{ tpl (include "xray.customVolumeMounts" .) . | indent 8 }} +{{- end }} +{{- with .Values.router.customVolumeMounts }} +{{ tpl . $ | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.router.resources | indent 10 }} +{{- if .Values.router.startupProbe.enabled }} + startupProbe: +{{ tpl .Values.router.startupProbe.config . | indent 10 }} +{{- end }} +{{- if .Values.router.livenessProbe.enabled }} + livenessProbe: +{{ tpl .Values.router.livenessProbe.config . | indent 10 }} +{{- end }} +{{- if .Values.router.readinessProbe.enabled }} + readinessProbe: +{{ tpl .Values.router.readinessProbe.config . | indent 10 }} +{{- end }} + - name: {{ .Values.observability.name }} + image: {{ include "xray.getImageInfoByValue" (list . "observability") }} + imagePullPolicy: {{ .Values.observability.image.imagePullPolicy }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- tpl (omit .Values.containerSecurityContext "enabled" | toYaml) . | nindent 10 }} + {{- end }} + command: + - '/bin/sh' + - '-c' + - > + {{- with .Values.common.preStartCommand }} + echo "Running custom common preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + exec /opt/jfrog/observability/app/bin/entrypoint-observability.sh; + {{- with .Values.observability.lifecycle }} + lifecycle: +{{ toYaml . | indent 10 }} + {{- end }} + env: + {{- if .Values.observability.extraEnvVars }} + {{- tpl .Values.observability.extraEnvVars . | nindent 8 }} + {{- end }} + volumeMounts: + - name: data-volume + mountPath: "{{ .Values.observability.persistence.mountPath }}" + resources: +{{ toYaml .Values.observability.resources | indent 10 }} + {{- if .Values.observability.startupProbe.enabled }} + startupProbe: +{{ tpl .Values.observability.startupProbe.config . | indent 10 }} + {{- end }} + {{- if .Values.observability.livenessProbe.enabled }} + livenessProbe: +{{ tpl .Values.observability.livenessProbe.config . | indent 10 }} + {{- end }} + - name: {{ .Values.server.name }} + image: {{ include "xray.getImageInfoByValue" (list . "server") }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- tpl (omit .Values.containerSecurityContext "enabled" | toYaml) . | nindent 10 }} + {{- end }} + command: + - '/bin/bash' + - '-c' + - > + {{- with .Values.common.preStartCommand }} + echo "Running custom common preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + {{- with .Values.server.preStartCommand }} + echo "Running custom Server preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + exec /opt/jfrog/xray/app/bin/wrapper.sh; + {{- with .Values.server.lifecycle }} + lifecycle: +{{ toYaml . | indent 10 }} + {{- end }} + env: + {{ include "xray.envVariables" . | indent 8 }} + {{ include "xray.rabbitmqTlsEnvVariables" . | indent 8 }} + {{- if and .Values.rabbitmq.external.secrets (not .Values.common.rabbitmq.connectionConfigFromEnvironment) }} + - name: JF_SHARED_RABBITMQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ tpl .Values.rabbitmq.external.secrets.username.name . }} + key: {{ tpl .Values.rabbitmq.external.secrets.username.key . }} + - name: JF_SHARED_RABBITMQ_URL + valueFrom: + secretKeyRef: + name: {{ tpl .Values.rabbitmq.external.secrets.url.name . }} + key: {{ tpl .Values.rabbitmq.external.secrets.url.key . }} + {{- end }} + {{- if or .Values.database.secrets.user .Values.database.user }} + - name: JF_SHARED_DATABASE_USERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.user }} + name: {{ tpl .Values.database.secrets.user.name . }} + key: {{ tpl .Values.database.secrets.user.key . }} + {{- else if .Values.database.user }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-user + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.actualUsername .Values.database.actualUsername }} + - name: JF_SHARED_DATABASE_ACTUALUSERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.actualUsername }} + name: {{ tpl .Values.database.secrets.actualUsername.name . }} + key: {{ tpl .Values.database.secrets.actualUsername.key . }} + {{- else if .Values.database.actualUsername }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-actualUsername + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.url .Values.database.url }} + - name: JF_SHARED_DATABASE_URL + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.url }} + name: {{ tpl .Values.database.secrets.url.name . }} + key: {{ tpl .Values.database.secrets.url.key . }} + {{- else if .Values.database.url }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-url + {{- end }} + {{- end }} + {{- if .Values.common.rabbitmq.connectionConfigFromEnvironment }} + - name: JF_SHARED_RABBITMQ_USERNAME + value: {{ include "rabbitmq.user" .}} + - name: JF_SHARED_RABBITMQ_URL + value: {{ include "rabbitmq.url" .}} + {{- end }} + - name: XRAY_K8S_ENV + value: "true" + - name: EXECUTION_JOB_AES_KEY + valueFrom: + secretKeyRef: + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.executionServiceAesKeySecretName" . }} + {{- else if and .Values.xray.unifiedSecretInstallation (or .Values.xray.executionServiceAesKeySecretName .Values.global.executionServiceAesKeySecretName) }} + name: {{ template "xray.executionServiceAesKeySecretName" . }} + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: execution-service-aes-key + {{- if .Values.common.extraEnvVars }} + {{- tpl .Values.common.extraEnvVars . | nindent 8 }} + {{- end }} + {{- if .Values.server.extraEnvVars }} + {{- tpl .Values.server.extraEnvVars . | nindent 8 }} + {{- end }} + ports: + - containerPort: {{ .Values.server.internalPort }} + name: http-server + volumeMounts: + - name: data-volume + mountPath: "{{ .Values.xray.persistence.mountPath }}" +{{- if or .Values.common.customVolumeMounts .Values.global.customVolumeMounts }} +{{ tpl (include "xray.customVolumeMounts" .) . | indent 8 }} +{{- end }} +{{- with .Values.server.customVolumeMounts }} +{{ tpl . $ | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.server.resources | indent 10 }} +{{- if .Values.server.startupProbe.enabled }} + startupProbe: +{{ tpl .Values.server.startupProbe.config . | indent 10 }} +{{- end }} +{{- if .Values.server.livenessProbe.enabled }} + livenessProbe: +{{ tpl .Values.server.livenessProbe.config . | indent 10 }} +{{- end }} + {{- $mountPath := .Values.xray.persistence.mountPath }} + {{- range .Values.xray.loggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: {{ include "xray.getImageInfoByValue" (list $ "initContainers") }} + imagePullPolicy: {{ $.Values.initContainers.image.pullPolicy }} + {{- if $.Values.containerSecurityContext.enabled }} + securityContext: {{- omit $.Values.containerSecurityContext "enabled" | toYaml | nindent 10 }} + {{- end }} + command: + - 'sh' + - '-c' + - 'sh /scripts/tail-log.sh {{ $mountPath }}/log {{ . }}' + volumeMounts: + - name: data-volume + mountPath: {{ $mountPath }} + - name: tail-logger-script + mountPath: /scripts/tail-log.sh + subPath: tail-log.sh + resources: +{{ toYaml $.Values.xray.loggersResources | indent 10 }} + {{- end }} + {{- if .Values.filebeat.enabled }} + - name: {{ .Values.filebeat.name }} + image: "{{ .Values.filebeat.image.repository }}:{{ .Values.filebeat.image.version }}" + imagePullPolicy: {{ .Values.filebeat.image.pullPolicy }} + args: + - "-e" + - "-E" + - "http.enabled=true" + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- tpl (omit .Values.containerSecurityContext "enabled" | toYaml) . | nindent 10 }} + {{- end }} + volumeMounts: + - name: filebeat-config + mountPath: /usr/share/filebeat/filebeat.yml + readOnly: true + subPath: filebeat.yml + - name: data-volume + mountPath: "{{ .Values.xray.persistence.mountPath }}" + livenessProbe: +{{ toYaml .Values.filebeat.livenessProbe | indent 10 }} + readinessProbe: +{{ toYaml .Values.filebeat.readinessProbe | indent 10 }} + resources: +{{ toYaml .Values.filebeat.resources | indent 10 }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriod }} + {{- end }} +{{- if or .Values.common.customSidecarContainers .Values.global.customSidecarContainers }} +{{ tpl (include "xray.customSidecarContainers" .) . | indent 6 }} +{{- end }} + {{- if or .Values.xray.nodeSelector .Values.global.nodeSelector }} +{{ tpl (include "xray.nodeSelector" .) . | indent 6 }} + {{- end }} + {{- if .Values.affinity }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- else if eq .Values.xray.podAntiAffinity.type "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + topologyKey: {{ .Values.xray.podAntiAffinity.topologyKey }} + labelSelector: + matchLabels: + app: {{ template "xray.name" . }} + release: {{ .Release.Name }} + {{- else if eq .Values.xray.podAntiAffinity.type "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: {{ .Values.xray.podAntiAffinity.topologyKey }} + labelSelector: + matchLabels: + app: {{ template "xray.name" . }} + release: {{ .Release.Name }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + ########## External secrets ########### + {{- if or .Values.xray.customCertificates.enabled .Values.global.customCertificates.enabled }} + - name: ca-certs + secret: + secretName: {{ default .Values.global.customCertificates.certificateSecretName .Values.xray.customCertificates.certificateSecretName }} + {{- end }} + {{- if .Values.systemYamlOverride.existingSecret }} + - name: systemyaml + secret: + secretName: {{ .Values.systemYamlOverride.existingSecret }} + {{- end }} + ############ Config map, Volumes and Custom Volumes ############## + {{- if .Values.xray.loggers }} + - name: tail-logger-script + configMap: + name: {{ template "xray.fullname" . }}-logger + {{- end }} + - name: data-volume + emptyDir: + sizeLimit: {{ .Values.common.persistence.size }} + {{- if and .Values.xray.unifiedSecretInstallation (eq (include "xray.checkDuplicateUnifiedCustomVolume" .) "false" ) }} + ######### unifiedSecretInstallation ########### + - name: {{ include "xray.unifiedCustomSecretVolumeName" . }} + secret: + secretName: {{ template "xray.name" . }}-unified-secret + {{- else if not .Values.xray.unifiedSecretInstallation }} + ######### Non unifiedSecretInstallation ########### + {{- if not .Values.systemYamlOverride.existingSecret }} + - name: systemyaml + secret: + secretName: {{ printf "%s-%s" (include "xray.fullname" .) "system-yaml" }} + {{- end }} + {{- end }} + {{- if or .Values.global.rabbitmq.auth.tls.enabled .Values.rabbitmq.auth.tls.enabled }} + - name: rabbitmq-ca-certs + secret: + secretName: {{ template "xray.rabbitmqCustomCertificateshandler" . }} + {{- end }} + +{{- if or .Values.common.customVolumes .Values.global.customVolumes }} +{{ tpl (include "xray.customVolumes" .) . | indent 6 }} +{{- end }} + {{- if .Values.filebeat.enabled }} + - name: filebeat-config + configMap: + name: {{ template "xray.fullname" . }}-filebeat-config + {{- end }} + {{- if .Values.common.configMaps }} + - name: xray-configmaps + configMap: + name: {{ template "xray.fullname" . }}-configmaps + {{- end }} +{{- end }} \ No newline at end of file diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-serviceaccount.yaml b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-serviceaccount.yaml new file mode 100644 index 000000000..70cf777a2 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: +{{- with .Values.serviceAccount.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + labels: + app: {{ template "xray.name" . }} + chart: {{ template "xray.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + name: {{ template "xray.serviceAccountName" . }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-statefulset.yaml b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-statefulset.yaml new file mode 100644 index 000000000..e70f7aef7 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-statefulset.yaml @@ -0,0 +1,1539 @@ +{{- if or (and .Values.splitXraytoSeparateDeployments.gradualUpgrade .Values.splitXraytoSeparateDeployments.enabled) (not .Values.splitXraytoSeparateDeployments.enabled) }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "xray.fullname" . }} + labels: + app: {{ template "xray.name" . }} + chart: {{ template "xray.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + version: {{ include "xray.app.version" . }} + component: {{ .Values.xray.name }} + {{- with .Values.xray.labels }} +{{ toYaml . | indent 4 }} + {{- end }} +{{- if .Release.IsUpgrade }} + unifiedUpgradeAllowed: {{ required "\n\n**************************************\nSTOP! UPGRADE from Xray 2.x (appVersion) currently not supported!\nIf this is an upgrade over an existing Xray 3.x, explicitly pass 'unifiedUpgradeAllowed=true' to upgrade.\n**************************************\n" .Values.unifiedUpgradeAllowed | quote }} +{{- end }} +{{- if and .Release.IsUpgrade .Values.postgresql.enabled }} + databaseUpgradeReady: {{ required "\n\n*********\nIMPORTANT: UPGRADE STOPPED to prevent data loss!\nReview CHANGELOG.md (https://github.com/jfrog/charts/blob/master/stable/xray/CHANGELOG.md), pass postgresql.image.tag '9.6.18-debian-10-r7' or '10.13.0-debian-10-r38' or '12.5.0-debian-10-r25' or 13.10.0-debian-11-r14 or 15.2.0-debian-11-r23 and databaseUpgradeReady=true if you are upgrading from chart version which has postgresql version 9.6.x or 10.13.x or 12.5.x or 13.x or 15.x" .Values.databaseUpgradeReady | quote }} +{{- end }} +{{- with .Values.server.statefulset.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + serviceName: "{{ template "xray.fullname" . }}" +{{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} +{{- end }} + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: {{ template "xray.name" . }} + release: {{ .Release.Name }} + component: {{ .Values.xray.name }} + template: + metadata: + labels: + app: {{ template "xray.name" . }} + chart: {{ template "xray.chart" . }} + release: {{ .Release.Name }} + component: {{ .Values.xray.name }} + {{- with .Values.xray.labels }} +{{ toYaml . | indent 8 }} + {{- end }} + annotations: + {{- if not .Values.xray.unifiedSecretInstallation }} + checksum/database-secrets: {{ include (print $.Template.BasePath "/xray-database-secrets.yaml") . | sha256sum }} + checksum/systemyaml: {{ include (print $.Template.BasePath "/xray-system-yaml.yaml") . | sha256sum }} + {{- else }} + checksum/xray-unified-secret: {{ include (print $.Template.BasePath "/xray-unified-secret.yaml") . | sha256sum }} + {{- end }} + {{- with .Values.analysis.annotations }} +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.indexer.annotations }} +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.persist.annotations }} +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.server.annotations }} +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.router.annotations }} +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.filebeat.annotations }} +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.xray.annotations }} +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.policyenforcer.annotations }} +{{ toYaml . | indent 8 }} + {{- end }} + spec: + {{- if .Values.xray.schedulerName }} + schedulerName: {{ .Values.xray.schedulerName | quote }} + {{- end }} + {{- if or .Values.imagePullSecrets .Values.global.imagePullSecrets }} +{{- include "xray.imagePullSecrets" . | indent 6 }} + {{- end }} + {{- if .Values.xray.priorityClass.existingPriorityClass }} + priorityClassName: {{ .Values.xray.priorityClass.existingPriorityClass }} + {{- else -}} + {{- if .Values.xray.priorityClass.create }} + priorityClassName: {{ default (include "xray.fullname" .) .Values.xray.priorityClass.name }} + {{- end }} + {{- end }} + serviceAccountName: {{ template "xray.serviceAccountName" . }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.common.topologySpreadConstraints }} + topologySpreadConstraints: +{{ tpl (toYaml .Values.common.topologySpreadConstraints) . | indent 8 }} + {{- end }} + initContainers: + {{- if or .Values.common.customInitContainersBegin .Values.global.customInitContainersBegin }} +{{ tpl (include "xray.customInitContainersBegin" .) . | indent 6 }} + {{- end }} + - name: 'copy-system-yaml' + image: {{ include "xray.getImageInfoByValue" (list . "initContainers") }} + imagePullPolicy: {{ .Values.initContainers.image.pullPolicy }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- tpl (omit .Values.containerSecurityContext "enabled" | toYaml) . | nindent 10 }} + {{- end }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'bash' + - '-c' + - > + if [[ -e "{{ .Values.xray.persistence.mountPath }}/etc/filebeat.yaml" ]]; then chmod 644 {{ .Values.xray.persistence.mountPath }}/etc/filebeat.yaml; fi; + echo "Copy system.yaml to {{ .Values.xray.persistence.mountPath }}/etc"; + mkdir -p {{ .Values.xray.persistence.mountPath }}/etc; + {{- if .Values.systemYamlOverride.existingSecret }} + cp -fv /tmp/etc/{{ .Values.systemYamlOverride.dataKey }} {{ .Values.xray.persistence.mountPath }}/etc/system.yaml; + {{- else }} + cp -fv /tmp/etc/system.yaml {{ .Values.xray.persistence.mountPath }}/etc/system.yaml; + {{- end }} + echo "Remove {{ .Values.xray.persistence.mountPath }}/lost+found folder if exists"; + rm -rfv {{ .Values.xray.persistence.mountPath }}/lost+found; + {{- if or .Values.xray.joinKey .Values.xray.joinKeySecretName .Values.global.joinKey .Values.global.joinKeySecretName }} + echo "Copy joinKey to {{ .Values.xray.persistence.mountPath }}/etc/security"; + mkdir -p {{ .Values.xray.persistence.mountPath }}/etc/security; + echo ${XRAY_JOIN_KEY} > {{ .Values.xray.persistence.mountPath }}/etc/security/join.key; + {{- end }} + {{- if or .Values.xray.masterKey .Values.xray.masterKeySecretName .Values.global.masterKey .Values.global.masterKeySecretName }} + echo "Copy masterKey to {{ .Values.xray.persistence.mountPath }}/etc/security"; + mkdir -p {{ .Values.xray.persistence.mountPath }}/etc/security; + echo ${XRAY_MASTER_KEY} > {{ .Values.xray.persistence.mountPath }}/etc/security/master.key; + {{- end }} + if set | grep -q "^XRAY_RABBITMQ_PASSWORD="; then + echo "Copy rabbitmq password to {{ .Values.xray.persistence.mountPath }}/etc/security"; + mkdir -p {{ .Values.xray.persistence.mountPath }}/etc/security; + echo ${XRAY_RABBITMQ_PASSWORD} > {{ .Values.xray.persistence.mountPath }}/etc/security/rabbitmq.password; + else + if test -f "{{ .Values.xray.persistence.mountPath }}/etc/security/rabbitmq.password"; then + echo "XRAY_RABBITMQ_PASSWORD is not set, removing existing rabbitmq.password file."; + rm -f {{ .Values.xray.persistence.mountPath }}/etc/security/rabbitmq.password; + fi + fi + {{ if or .Values.database.secrets.password .Values.database.password .Values.postgresql.enabled }} + if set | grep -q "^XRAY_POSTGRES_PASSWORD="; then + echo "Copy postgres password to {{ .Values.xray.persistence.mountPath }}/etc/security"; + mkdir -p {{ .Values.xray.persistence.mountPath }}/etc/security; + echo ${XRAY_POSTGRES_PASSWORD} > {{ .Values.xray.persistence.mountPath }}/etc/security/postgres.password; + else + if test -f "{{ .Values.xray.persistence.mountPath }}/etc/security/postgres.password"; then + echo "XRAY_POSTGRES_PASSWORD is not set, removing existing postgres.password file."; + rm -f {{ .Values.xray.persistence.mountPath }}/etc/security/postgres.password; + fi + fi + {{- end }} + env: + {{- if or .Values.xray.joinKey .Values.xray.joinKeySecretName .Values.global.joinKey .Values.global.joinKeySecretName }} + - name: XRAY_JOIN_KEY + valueFrom: + secretKeyRef: + {{- if or (not .Values.xray.unifiedSecretInstallation) (or .Values.xray.joinKeySecretName .Values.global.joinKeySecretName) }} + name: {{ include "xray.joinKeySecretName" . }} + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: join-key + {{- end }} + {{- if or .Values.xray.masterKey .Values.xray.masterKeySecretName .Values.global.masterKey .Values.global.masterKeySecretName }} + - name: XRAY_MASTER_KEY + valueFrom: + secretKeyRef: + {{- if or (not .Values.xray.unifiedSecretInstallation) (or .Values.xray.masterKeySecretName .Values.global.masterKeySecretName) }} + name: {{ include "xray.masterKeySecretName" . }} + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: master-key + {{- end }} + {{- if and .Values.rabbitmq.external.secrets (not .Values.common.rabbitmq.connectionConfigFromEnvironment) }} + - name: XRAY_RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ tpl .Values.rabbitmq.external.secrets.password.name . }} + key: {{ tpl .Values.rabbitmq.external.secrets.password.key . }} + {{- end }} + {{- if .Values.common.rabbitmq.connectionConfigFromEnvironment }} + - name: XRAY_RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "rabbitmq.passwordSecretName" .}} + key: rabbitmq-password + {{- end }} + {{ if or .Values.database.secrets.password .Values.database.password .Values.postgresql.enabled }} + - name: XRAY_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.password }} + name: {{ tpl .Values.database.secrets.password.name . }} + key: {{ tpl .Values.database.secrets.password.key . }} + {{- else if .Values.database.password }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-password + {{- else if .Values.postgresql.enabled }} + name: {{ .Release.Name }}-postgresql + key: password + {{- end }} + {{- end }} + volumeMounts: + - name: data-volume + mountPath: {{ .Values.xray.persistence.mountPath | quote }} + {{- if or (not .Values.xray.unifiedSecretInstallation) .Values.systemYamlOverride.existingSecret }} + - name: systemyaml + {{- else }} + - name: {{ include "xray.unifiedCustomSecretVolumeName" . }} + {{- end }} + {{- if .Values.systemYamlOverride.existingSecret }} + mountPath: "/tmp/etc/{{.Values.systemYamlOverride.dataKey}}" + subPath: {{ .Values.systemYamlOverride.dataKey }} + {{- else }} + mountPath: "/tmp/etc/system.yaml" + subPath: system.yaml + {{- end }} + {{- if or .Values.xray.customCertificates.enabled .Values.global.customCertificates.enabled .Values.rabbitmq.auth.tls.enabled .Values.global.rabbitmq.auth.tls.enabled }} + - name: copy-custom-certificates + image: {{ include "xray.getImageInfoByValue" (list . "initContainers") }} + imagePullPolicy: {{ .Values.initContainers.image.pullPolicy }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- tpl (omit .Values.containerSecurityContext "enabled" | toYaml) . | nindent 10 }} + {{- end }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'bash' + - '-c' + - > +{{ include "xray.copyCustomCerts" . | indent 10 }} +{{ include "xray.copyRabbitmqCustomCerts" . | indent 10 }} + volumeMounts: + - name: data-volume + mountPath: {{ .Values.xray.persistence.mountPath }} + {{- if or .Values.xray.customCertificates.enabled .Values.global.customCertificates.enabled }} + - name: ca-certs + mountPath: "/tmp/certs" + {{- end }} + {{- if or .Values.global.rabbitmq.auth.tls.enabled .Values.rabbitmq.auth.tls.enabled }} + - name: rabbitmq-ca-certs + mountPath: "/tmp/rabbitmqcerts" + {{- end }} + {{- end }} + {{- if .Values.waitForDatabase }} + {{- if .Values.postgresql.enabled }} + - name: "wait-for-db" + image: "{{ include "xray.getImageInfoByValue" (list . "initContainers") }}" + imagePullPolicy: {{ .Values.initContainers.image.pullPolicy }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- tpl (omit .Values.containerSecurityContext "enabled" | toYaml) . | nindent 10 }} + {{- end }} + resources: +{{ toYaml .Values.initContainers.resources | indent 10 }} + command: + - 'bash' + - '-c' + - | + echo "Waiting for postgresql to come up" + ready=false; + while ! $ready; do echo waiting; + timeout 2s bash -c " + {{- with .Values.common.preStartCommand }} + echo "Running custom common preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + exec /opt/jfrog/router/app/bin/entrypoint-router.sh; + {{- with .Values.router.lifecycle }} + lifecycle: +{{ toYaml . | indent 10 }} + {{- end }} + env: + - name: JF_ROUTER_TOPOLOGY_LOCAL_REQUIREDSERVICETYPES + value: {{ include "xray.router.requiredServiceTypes" . }} + {{- if .Values.router.extraEnvVars }} + {{- tpl .Values.router.extraEnvVars . | nindent 8 }} + {{- end }} + ports: + - name: http-router + containerPort: {{ .Values.router.internalPort }} + volumeMounts: + - name: data-volume + mountPath: {{ .Values.router.persistence.mountPath | quote }} +{{- if or .Values.common.customVolumeMounts .Values.global.customVolumeMounts }} +{{ tpl (include "xray.customVolumeMounts" .) . | indent 8 }} +{{- end }} +{{- with .Values.router.customVolumeMounts }} +{{ tpl . $ | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.router.resources | indent 10 }} +{{- if .Values.router.startupProbe.enabled }} + startupProbe: +{{ tpl .Values.router.startupProbe.config . | indent 10 }} +{{- end }} +{{- if .Values.router.livenessProbe.enabled }} + livenessProbe: +{{ tpl .Values.router.livenessProbe.config . | indent 10 }} +{{- end }} +{{- if .Values.router.readinessProbe.enabled }} + readinessProbe: +{{ tpl .Values.router.readinessProbe.config . | indent 10 }} +{{- end }} + - name: {{ .Values.observability.name }} + image: {{ include "xray.getImageInfoByValue" (list . "observability") }} + imagePullPolicy: {{ .Values.observability.image.imagePullPolicy }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- tpl (omit .Values.containerSecurityContext "enabled" | toYaml) . | nindent 10 }} + {{- end }} + command: + - '/bin/sh' + - '-c' + - > + {{- with .Values.common.preStartCommand }} + echo "Running custom common preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + exec /opt/jfrog/observability/app/bin/entrypoint-observability.sh; + {{- with .Values.observability.lifecycle }} + lifecycle: +{{ toYaml . | indent 10 }} + {{- end }} + env: + {{- if .Values.observability.extraEnvVars }} + {{- tpl .Values.observability.extraEnvVars . | nindent 8 }} + {{- end }} + volumeMounts: + - name: data-volume + mountPath: "{{ .Values.observability.persistence.mountPath }}" + resources: +{{ toYaml .Values.observability.resources | indent 10 }} + {{- if .Values.observability.startupProbe.enabled }} + startupProbe: +{{ tpl .Values.observability.startupProbe.config . | indent 10 }} + {{- end }} + {{- if .Values.observability.livenessProbe.enabled }} + livenessProbe: +{{ tpl .Values.observability.livenessProbe.config . | indent 10 }} + {{- end }} + - name: {{ .Values.server.name }} + image: {{ include "xray.getImageInfoByValue" (list . "server") }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- tpl (omit .Values.containerSecurityContext "enabled" | toYaml) . | nindent 10 }} + {{- end }} + command: + - '/bin/bash' + - '-c' + - > + {{- with .Values.common.preStartCommand }} + echo "Running custom common preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + {{- with .Values.server.preStartCommand }} + echo "Running custom Server preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + exec /opt/jfrog/xray/app/bin/wrapper.sh; + {{- with .Values.server.lifecycle }} + lifecycle: +{{ toYaml . | indent 10 }} + {{- end }} + env: + {{ include "xray.envVariables" . | indent 8 }} + {{ include "xray.rabbitmqTlsEnvVariables" . | indent 8 }} + {{- if and .Values.rabbitmq.external.secrets (not .Values.common.rabbitmq.connectionConfigFromEnvironment) }} + - name: JF_SHARED_RABBITMQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ tpl .Values.rabbitmq.external.secrets.username.name . }} + key: {{ tpl .Values.rabbitmq.external.secrets.username.key . }} + - name: JF_SHARED_RABBITMQ_URL + valueFrom: + secretKeyRef: + name: {{ tpl .Values.rabbitmq.external.secrets.url.name . }} + key: {{ tpl .Values.rabbitmq.external.secrets.url.key . }} + {{- end }} + {{- if or .Values.database.secrets.user .Values.database.user }} + - name: JF_SHARED_DATABASE_USERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.user }} + name: {{ tpl .Values.database.secrets.user.name . }} + key: {{ tpl .Values.database.secrets.user.key . }} + {{- else if .Values.database.user }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-user + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.actualUsername .Values.database.actualUsername }} + - name: JF_SHARED_DATABASE_ACTUALUSERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.actualUsername }} + name: {{ tpl .Values.database.secrets.actualUsername.name . }} + key: {{ tpl .Values.database.secrets.actualUsername.key . }} + {{- else if .Values.database.actualUsername }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-actualUsername + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.url .Values.database.url }} + - name: JF_SHARED_DATABASE_URL + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.url }} + name: {{ tpl .Values.database.secrets.url.name . }} + key: {{ tpl .Values.database.secrets.url.key . }} + {{- else if .Values.database.url }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-url + {{- end }} + {{- end }} + {{- if .Values.common.rabbitmq.connectionConfigFromEnvironment }} + - name: JF_SHARED_RABBITMQ_USERNAME + value: {{ include "rabbitmq.user" .}} + - name: JF_SHARED_RABBITMQ_URL + value: {{ include "rabbitmq.url" .}} + {{- end }} + - name: XRAY_K8S_ENV + value: "true" + - name: EXECUTION_JOB_AES_KEY + valueFrom: + secretKeyRef: + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.executionServiceAesKeySecretName" . }} + {{- else if and .Values.xray.unifiedSecretInstallation (or .Values.xray.executionServiceAesKeySecretName .Values.global.executionServiceAesKeySecretName) }} + name: {{ template "xray.executionServiceAesKeySecretName" . }} + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: execution-service-aes-key + {{- if .Values.common.extraEnvVars }} + {{- tpl .Values.common.extraEnvVars . | nindent 8 }} + {{- end }} + {{- if .Values.server.extraEnvVars }} + {{- tpl .Values.server.extraEnvVars . | nindent 8 }} + {{- end }} + ports: + - containerPort: {{ .Values.server.internalPort }} + name: http-server + volumeMounts: + - name: data-volume + mountPath: "{{ .Values.xray.persistence.mountPath }}" +{{- if or .Values.common.customVolumeMounts .Values.global.customVolumeMounts }} +{{ tpl (include "xray.customVolumeMounts" .) . | indent 8 }} +{{- end }} +{{- with .Values.server.customVolumeMounts }} +{{ tpl . $ | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.server.resources | indent 10 }} +{{- if .Values.server.startupProbe.enabled }} + startupProbe: +{{ tpl .Values.server.startupProbe.config . | indent 10 }} +{{- end }} +{{- if .Values.server.livenessProbe.enabled }} + livenessProbe: +{{ tpl .Values.server.livenessProbe.config . | indent 10 }} +{{- end }} + - name: {{ .Values.analysis.name }} + image: {{ include "xray.getImageInfoByValue" (list . "analysis") }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- tpl (omit .Values.containerSecurityContext "enabled" | toYaml) . | nindent 10 }} + {{- end }} + command: + - '/bin/bash' + - '-c' + - > + {{- with .Values.common.preStartCommand }} + echo "Running custom common preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + {{- with .Values.analysis.preStartCommand }} + echo "Running custom Analysis preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + exec /opt/jfrog/xray/app/bin/wrapper.sh; + {{- with .Values.analysis.lifecycle }} + lifecycle: +{{ toYaml . | indent 10 }} + {{- end }} + env: + {{ include "xray.envVariables" . | indent 8 }} + {{ include "xray.rabbitmqTlsEnvVariables" . | indent 8 }} + {{- if and .Values.rabbitmq.external.secrets (not .Values.common.rabbitmq.connectionConfigFromEnvironment) }} + - name: JF_SHARED_RABBITMQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ tpl .Values.rabbitmq.external.secrets.username.name . }} + key: {{ tpl .Values.rabbitmq.external.secrets.username.key . }} + - name: JF_SHARED_RABBITMQ_URL + valueFrom: + secretKeyRef: + name: {{ tpl .Values.rabbitmq.external.secrets.url.name . }} + key: {{ tpl .Values.rabbitmq.external.secrets.url.key . }} + {{- end }} + {{- if or .Values.database.secrets.user .Values.database.user }} + - name: JF_SHARED_DATABASE_USERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.user }} + name: {{ tpl .Values.database.secrets.user.name . }} + key: {{ tpl .Values.database.secrets.user.key . }} + {{- else if .Values.database.user }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-user + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.actualUsername .Values.database.actualUsername }} + - name: JF_SHARED_DATABASE_ACTUALUSERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.actualUsername }} + name: {{ tpl .Values.database.secrets.actualUsername.name . }} + key: {{ tpl .Values.database.secrets.actualUsername.key . }} + {{- else if .Values.database.actualUsername }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-actualUsername + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.url .Values.database.url }} + - name: JF_SHARED_DATABASE_URL + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.url }} + name: {{ tpl .Values.database.secrets.url.name . }} + key: {{ tpl .Values.database.secrets.url.key . }} + {{- else if .Values.database.url }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-url + {{- end }} + {{- end }} + {{- if .Values.common.rabbitmq.connectionConfigFromEnvironment }} + - name: JF_SHARED_RABBITMQ_USERNAME + value: {{ include "rabbitmq.user" .}} + - name: JF_SHARED_RABBITMQ_URL + value: {{ include "rabbitmq.url" .}} + {{- end }} + - name: XRAY_HA_NODE_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: XRAY_K8S_ENV + value: "true" + - name: EXECUTION_JOB_AES_KEY + valueFrom: + secretKeyRef: + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.executionServiceAesKeySecretName" . }} + {{- else if and .Values.xray.unifiedSecretInstallation (or .Values.xray.executionServiceAesKeySecretName .Values.global.executionServiceAesKeySecretName) }} + name: {{ template "xray.executionServiceAesKeySecretName" . }} + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: execution-service-aes-key + {{- if .Values.common.extraEnvVars }} + {{- tpl .Values.common.extraEnvVars . | nindent 8 }} + {{- end }} + {{- if .Values.analysis.extraEnvVars }} + {{- tpl .Values.analysis.extraEnvVars . | nindent 8 }} + {{- end }} + ports: + - containerPort: {{ .Values.analysis.internalPort }} + name: http-analysis + volumeMounts: + - name: data-volume + mountPath: "{{ .Values.xray.persistence.mountPath }}" +{{- if or .Values.common.customVolumeMounts .Values.global.customVolumeMounts }} +{{ tpl (include "xray.customVolumeMounts" .) . | indent 8 }} +{{- end }} +{{- with .Values.analysis.customVolumeMounts }} +{{ tpl . $ | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.analysis.resources | indent 10 }} +{{- if .Values.analysis.startupProbe.enabled }} + startupProbe: +{{ tpl .Values.analysis.startupProbe.config . | indent 10 }} +{{- end }} +{{- if .Values.analysis.livenessProbe.enabled }} + livenessProbe: +{{ tpl .Values.analysis.livenessProbe.config . | indent 10 }} +{{- end }} +{{- if .Values.sbom.enabled }} + - name: {{ .Values.sbom.name }} + image: {{ include "xray.getImageInfoByValue" (list . "sbom") }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- tpl (omit .Values.containerSecurityContext "enabled" | toYaml) . | nindent 10 }} + {{- end }} + command: + - '/bin/bash' + - '-c' + - > + {{- with .Values.common.preStartCommand }} + echo "Running custom common preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + {{- with .Values.sbom.preStartCommand }} + echo "Running custom Sbom preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + exec /opt/jfrog/xray/app/bin/wrapper.sh; + {{- with .Values.sbom.lifecycle }} + lifecycle: +{{ toYaml . | indent 10 }} + {{- end }} + env: + {{ include "xray.envVariables" . | indent 8 }} + {{ include "xray.rabbitmqTlsEnvVariables" . | indent 8 }} + {{- if and .Values.rabbitmq.external.secrets (not .Values.common.rabbitmq.connectionConfigFromEnvironment) }} + - name: JF_SHARED_RABBITMQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ tpl .Values.rabbitmq.external.secrets.username.name . }} + key: {{ tpl .Values.rabbitmq.external.secrets.username.key . }} + - name: JF_SHARED_RABBITMQ_URL + valueFrom: + secretKeyRef: + name: {{ tpl .Values.rabbitmq.external.secrets.url.name . }} + key: {{ tpl .Values.rabbitmq.external.secrets.url.key . }} + {{- end }} + {{- if or .Values.database.secrets.user .Values.database.user }} + - name: JF_SHARED_DATABASE_USERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.user }} + name: {{ tpl .Values.database.secrets.user.name . }} + key: {{ tpl .Values.database.secrets.user.key . }} + {{- else if .Values.database.user }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-user + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.actualUsername .Values.database.actualUsername }} + - name: JF_SHARED_DATABASE_ACTUALUSERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.actualUsername }} + name: {{ tpl .Values.database.secrets.actualUsername.name . }} + key: {{ tpl .Values.database.secrets.actualUsername.key . }} + {{- else if .Values.database.actualUsername }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-actualUsername + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.url .Values.database.url }} + - name: JF_SHARED_DATABASE_URL + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.url }} + name: {{ tpl .Values.database.secrets.url.name . }} + key: {{ tpl .Values.database.secrets.url.key . }} + {{- else if .Values.database.url }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-url + {{- end }} + {{- end }} + {{- if .Values.common.rabbitmq.connectionConfigFromEnvironment }} + - name: JF_SHARED_RABBITMQ_USERNAME + value: {{ include "rabbitmq.user" .}} + - name: JF_SHARED_RABBITMQ_URL + value: {{ include "rabbitmq.url" .}} + {{- end }} + - name: XRAY_HA_NODE_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: XRAY_K8S_ENV + value: "true" + - name: EXECUTION_JOB_AES_KEY + valueFrom: + secretKeyRef: + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.executionServiceAesKeySecretName" . }} + {{- else if and .Values.xray.unifiedSecretInstallation (or .Values.xray.executionServiceAesKeySecretName .Values.global.executionServiceAesKeySecretName) }} + name: {{ template "xray.executionServiceAesKeySecretName" . }} + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: execution-service-aes-key + {{- if .Values.common.extraEnvVars }} + {{- tpl .Values.common.extraEnvVars . | nindent 8 }} + {{- end }} + {{- if .Values.sbom.extraEnvVars }} + {{- tpl .Values.sbom.extraEnvVars . | nindent 8 }} + {{- end }} + ports: + - containerPort: {{ .Values.sbom.internalPort }} + name: http-sbom + volumeMounts: + - name: data-volume + mountPath: "{{ .Values.xray.persistence.mountPath }}" +{{- if or .Values.common.customVolumeMounts .Values.global.customVolumeMounts }} +{{ tpl (include "xray.customVolumeMounts" .) . | indent 8 }} +{{- end }} +{{- with .Values.sbom.customVolumeMounts }} +{{ tpl . $ | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.sbom.resources | indent 10 }} +{{- if .Values.sbom.startupProbe.enabled }} + startupProbe: +{{ tpl .Values.sbom.startupProbe.config . | indent 10 }} +{{- end }} +{{- if .Values.sbom.livenessProbe.enabled }} + livenessProbe: +{{ tpl .Values.sbom.livenessProbe.config . | indent 10 }} +{{- end }} +{{- end }} +{{- if .Values.panoramic.enabled }} + - name: {{ .Values.panoramic.name }} + image: {{ include "xray.getImageInfoByValue" (list . "panoramic") }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- tpl (omit .Values.containerSecurityContext "enabled" | toYaml) . | nindent 10 }} + {{- end }} + command: + - '/bin/bash' + - '-c' + - > + {{- with .Values.common.preStartCommand }} + echo "Running custom common preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + {{- with .Values.panoramic.preStartCommand }} + echo "Running custom panoramic preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + exec /opt/jfrog/xray/app/bin/wrapper.sh; + {{- with .Values.panoramic.lifecycle }} + lifecycle: +{{ toYaml . | indent 10 }} + {{- end }} + env: + {{ include "xray.envVariables" . | indent 8 }} + {{ include "xray.rabbitmqTlsEnvVariables" . | indent 8 }} + {{- if and .Values.rabbitmq.external.secrets (not .Values.common.rabbitmq.connectionConfigFromEnvironment) }} + - name: JF_SHARED_RABBITMQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ tpl .Values.rabbitmq.external.secrets.username.name . }} + key: {{ tpl .Values.rabbitmq.external.secrets.username.key . }} + - name: JF_SHARED_RABBITMQ_URL + valueFrom: + secretKeyRef: + name: {{ tpl .Values.rabbitmq.external.secrets.url.name . }} + key: {{ tpl .Values.rabbitmq.external.secrets.url.key . }} + {{- end }} + {{- if or .Values.database.secrets.user .Values.database.user }} + - name: JF_SHARED_DATABASE_USERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.user }} + name: {{ tpl .Values.database.secrets.user.name . }} + key: {{ tpl .Values.database.secrets.user.key . }} + {{- else if .Values.database.user }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-user + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.actualUsername .Values.database.actualUsername }} + - name: JF_SHARED_DATABASE_ACTUALUSERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.actualUsername }} + name: {{ tpl .Values.database.secrets.actualUsername.name . }} + key: {{ tpl .Values.database.secrets.actualUsername.key . }} + {{- else if .Values.database.actualUsername }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-actualUsername + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.url .Values.database.url }} + - name: JF_SHARED_DATABASE_URL + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.url }} + name: {{ tpl .Values.database.secrets.url.name . }} + key: {{ tpl .Values.database.secrets.url.key . }} + {{- else if .Values.database.url }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-url + {{- end }} + {{- end }} + {{- if .Values.common.rabbitmq.connectionConfigFromEnvironment }} + - name: JF_SHARED_RABBITMQ_USERNAME + value: {{ include "rabbitmq.user" .}} + - name: JF_SHARED_RABBITMQ_URL + value: {{ include "rabbitmq.url" .}} + {{- end }} + - name: XRAY_HA_NODE_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: XRAY_K8S_ENV + value: "true" + - name: EXECUTION_JOB_AES_KEY + valueFrom: + secretKeyRef: + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.executionServiceAesKeySecretName" . }} + {{- else if and .Values.xray.unifiedSecretInstallation (or .Values.xray.executionServiceAesKeySecretName .Values.global.executionServiceAesKeySecretName) }} + name: {{ template "xray.executionServiceAesKeySecretName" . }} + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: execution-service-aes-key + {{- if .Values.common.extraEnvVars }} + {{- tpl .Values.common.extraEnvVars . | nindent 8 }} + {{- end }} + {{- if .Values.panoramic.extraEnvVars }} + {{- tpl .Values.panoramic.extraEnvVars . | nindent 8 }} + {{- end }} + volumeMounts: + - name: data-volume + mountPath: "{{ .Values.xray.persistence.mountPath }}" +{{- if or .Values.common.customVolumeMounts .Values.global.customVolumeMounts }} +{{ tpl (include "xray.customVolumeMounts" .) . | indent 8 }} +{{- end }} +{{- with .Values.sbom.customVolumeMounts }} +{{ tpl . $ | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.panoramic.resources | indent 10 }} +{{- if .Values.panoramic.startupProbe.enabled }} + startupProbe: +{{ tpl .Values.panoramic.startupProbe.config . | indent 10 }} +{{- end }} +{{- if .Values.panoramic.livenessProbe.enabled }} + livenessProbe: +{{ tpl .Values.panoramic.livenessProbe.config . | indent 10 }} +{{- end }} +{{- end }} + - name: {{ .Values.policyenforcer.name }} + image: {{ include "xray.getImageInfoByValue" (list . "policyenforcer") }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- tpl (omit .Values.containerSecurityContext "enabled" | toYaml) . | nindent 10 }} + {{- end }} + command: + - '/bin/bash' + - '-c' + - > + {{- with .Values.common.preStartCommand }} + echo "Running custom common preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + {{- with .Values.policyenforcer.preStartCommand }} + echo "Running custom policyenforcer preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + exec /opt/jfrog/xray/app/bin/wrapper.sh; + {{- with .Values.policyenforcer.lifecycle }} + lifecycle: +{{ toYaml . | indent 10 }} + {{- end }} + env: + {{ include "xray.envVariables" . | indent 8 }} + {{ include "xray.rabbitmqTlsEnvVariables" . | indent 8 }} + {{- if and .Values.rabbitmq.external.secrets (not .Values.common.rabbitmq.connectionConfigFromEnvironment) }} + - name: JF_SHARED_RABBITMQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ tpl .Values.rabbitmq.external.secrets.username.name . }} + key: {{ tpl .Values.rabbitmq.external.secrets.username.key . }} + - name: JF_SHARED_RABBITMQ_URL + valueFrom: + secretKeyRef: + name: {{ tpl .Values.rabbitmq.external.secrets.url.name . }} + key: {{ tpl .Values.rabbitmq.external.secrets.url.key . }} + {{- end }} + {{- if or .Values.database.secrets.user .Values.database.user }} + - name: JF_SHARED_DATABASE_USERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.user }} + name: {{ tpl .Values.database.secrets.user.name . }} + key: {{ tpl .Values.database.secrets.user.key . }} + {{- else if .Values.database.user }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-user + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.actualUsername .Values.database.actualUsername }} + - name: JF_SHARED_DATABASE_ACTUALUSERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.actualUsername }} + name: {{ tpl .Values.database.secrets.actualUsername.name . }} + key: {{ tpl .Values.database.secrets.actualUsername.key . }} + {{- else if .Values.database.actualUsername }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-actualUsername + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.url .Values.database.url }} + - name: JF_SHARED_DATABASE_URL + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.url }} + name: {{ tpl .Values.database.secrets.url.name . }} + key: {{ tpl .Values.database.secrets.url.key . }} + {{- else if .Values.database.url }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-url + {{- end }} + {{- end }} + {{- if .Values.common.rabbitmq.connectionConfigFromEnvironment }} + - name: JF_SHARED_RABBITMQ_USERNAME + value: {{ include "rabbitmq.user" .}} + - name: JF_SHARED_RABBITMQ_URL + value: {{ include "rabbitmq.url" .}} + {{- end }} + - name: XRAY_HA_NODE_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: XRAY_K8S_ENV + value: "true" + {{- if .Values.common.extraEnvVars }} + {{- tpl .Values.common.extraEnvVars . | nindent 8 }} + {{- end }} + {{- if .Values.indexer.extraEnvVars }} + {{- tpl .Values.indexer.extraEnvVars . | nindent 8 }} + {{- end }} + ports: + - containerPort: {{ .Values.policyenforcer.internalPort }} + name: http-polenf + volumeMounts: + - name: data-volume + mountPath: "{{ .Values.xray.persistence.mountPath }}" +{{- if or .Values.common.customVolumeMounts .Values.global.customVolumeMounts }} +{{ tpl (include "xray.customVolumeMounts" .) . | indent 8 }} +{{- end }} +{{- with .Values.sbom.customVolumeMounts }} +{{ tpl . $ | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.policyenforcer.resources | indent 10 }} +{{- if .Values.policyenforcer.startupProbe.enabled }} + startupProbe: +{{ tpl .Values.policyenforcer.startupProbe.config . | indent 10 }} +{{- end }} +{{- if .Values.policyenforcer.livenessProbe.enabled }} + livenessProbe: +{{ tpl .Values.policyenforcer.livenessProbe.config . | indent 10 }} +{{- end }} + - name: {{ .Values.indexer.name }} + image: {{ include "xray.getImageInfoByValue" (list . "indexer") }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- tpl (omit .Values.containerSecurityContext "enabled" | toYaml) . | nindent 10 }} + {{- end }} + command: + - '/bin/bash' + - '-c' + - > + {{- with .Values.common.preStartCommand }} + echo "Running custom common preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + {{- with .Values.indexer.preStartCommand }} + echo "Running custom Indexer preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + exec /opt/jfrog/xray/app/bin/wrapper.sh; + {{- with .Values.indexer.lifecycle }} + lifecycle: +{{ toYaml . | indent 10 }} + {{- end }} + env: + {{ include "xray.envVariables" . | indent 8 }} + {{ include "xray.rabbitmqTlsEnvVariables" . | indent 8 }} + {{- if and .Values.rabbitmq.external.secrets (not .Values.common.rabbitmq.connectionConfigFromEnvironment) }} + - name: JF_SHARED_RABBITMQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ tpl .Values.rabbitmq.external.secrets.username.name . }} + key: {{ tpl .Values.rabbitmq.external.secrets.username.key . }} + - name: JF_SHARED_RABBITMQ_URL + valueFrom: + secretKeyRef: + name: {{ tpl .Values.rabbitmq.external.secrets.url.name . }} + key: {{ tpl .Values.rabbitmq.external.secrets.url.key . }} + {{- end }} + {{- if or .Values.database.secrets.user .Values.database.user }} + - name: JF_SHARED_DATABASE_USERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.user }} + name: {{ tpl .Values.database.secrets.user.name . }} + key: {{ tpl .Values.database.secrets.user.key . }} + {{- else if .Values.database.user }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-user + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.actualUsername .Values.database.actualUsername }} + - name: JF_SHARED_DATABASE_ACTUALUSERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.actualUsername }} + name: {{ tpl .Values.database.secrets.actualUsername.name . }} + key: {{ tpl .Values.database.secrets.actualUsername.key . }} + {{- else if .Values.database.actualUsername }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-actualUsername + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.url .Values.database.url }} + - name: JF_SHARED_DATABASE_URL + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.url }} + name: {{ tpl .Values.database.secrets.url.name . }} + key: {{ tpl .Values.database.secrets.url.key . }} + {{- else if .Values.database.url }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-url + {{- end }} + {{- end }} + {{- if .Values.common.rabbitmq.connectionConfigFromEnvironment }} + - name: JF_SHARED_RABBITMQ_USERNAME + value: {{ include "rabbitmq.user" .}} + - name: JF_SHARED_RABBITMQ_URL + value: {{ include "rabbitmq.url" .}} + {{- end }} + - name: XRAY_HA_NODE_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: XRAY_K8S_ENV + value: "true" + {{- if .Values.common.extraEnvVars }} + {{- tpl .Values.common.extraEnvVars . | nindent 8 }} + {{- end }} + {{- if .Values.indexer.extraEnvVars }} + {{- tpl .Values.indexer.extraEnvVars . | nindent 8 }} + {{- end }} + ports: + - containerPort: {{ .Values.indexer.internalPort }} + name: http-indexer + volumeMounts: + - name: data-volume + mountPath: "{{ .Values.xray.persistence.mountPath }}" +{{- if or .Values.common.customVolumeMounts .Values.global.customVolumeMounts }} +{{ tpl (include "xray.customVolumeMounts" .) . | indent 8 }} +{{- end }} +{{- with .Values.indexer.customVolumeMounts }} +{{ tpl . $ | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.indexer.resources | indent 10 }} +{{- if .Values.indexer.startupProbe.enabled }} + startupProbe: +{{ tpl .Values.indexer.startupProbe.config . | indent 10 }} +{{- end }} +{{- if .Values.indexer.livenessProbe.enabled }} + livenessProbe: +{{ tpl .Values.indexer.livenessProbe.config . | indent 10 }} +{{- end }} + - name: {{ .Values.persist.name }} + image: {{ include "xray.getImageInfoByValue" (list . "persist") }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- tpl (omit .Values.containerSecurityContext "enabled" | toYaml) . | nindent 10 }} + {{- end }} + command: + - '/bin/bash' + - '-c' + - > + {{- with .Values.common.preStartCommand }} + echo "Running custom common preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + {{- with .Values.persist.preStartCommand }} + echo "Running custom Persist preStartCommand command"; + {{ tpl . $ }}; + {{- end }} + exec /opt/jfrog/xray/app/bin/wrapper.sh; + {{- with .Values.persist.lifecycle }} + lifecycle: +{{ toYaml . | indent 10 }} + {{- end }} + env: + {{ include "xray.envVariables" . | indent 8 }} + {{ include "xray.rabbitmqTlsEnvVariables" . | indent 8 }} + {{- if and .Values.rabbitmq.external.secrets (not .Values.common.rabbitmq.connectionConfigFromEnvironment) }} + - name: JF_SHARED_RABBITMQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ tpl .Values.rabbitmq.external.secrets.username.name . }} + key: {{ tpl .Values.rabbitmq.external.secrets.username.key . }} + - name: JF_SHARED_RABBITMQ_URL + valueFrom: + secretKeyRef: + name: {{ tpl .Values.rabbitmq.external.secrets.url.name . }} + key: {{ tpl .Values.rabbitmq.external.secrets.url.key . }} + {{- end }} + {{- if or .Values.database.secrets.user .Values.database.user }} + - name: JF_SHARED_DATABASE_USERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.user }} + name: {{ tpl .Values.database.secrets.user.name . }} + key: {{ tpl .Values.database.secrets.user.key . }} + {{- else if .Values.database.user }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-user + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.actualUsername .Values.database.actualUsername }} + - name: JF_SHARED_DATABASE_ACTUALUSERNAME + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.actualUsername }} + name: {{ tpl .Values.database.secrets.actualUsername.name . }} + key: {{ tpl .Values.database.secrets.actualUsername.key . }} + {{- else if .Values.database.actualUsername }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-actualUsername + {{- end }} + {{- end }} + {{- if or .Values.database.secrets.url .Values.database.url }} + - name: JF_SHARED_DATABASE_URL + valueFrom: + secretKeyRef: + {{- if .Values.database.secrets.url }} + name: {{ tpl .Values.database.secrets.url.name . }} + key: {{ tpl .Values.database.secrets.url.key . }} + {{- else if .Values.database.url }} + {{- if not .Values.xray.unifiedSecretInstallation }} + name: {{ template "xray.fullname" . }}-database-creds + {{- else }} + name: "{{ template "xray.name" . }}-unified-secret" + {{- end }} + key: db-url + {{- end }} + {{- end }} + {{- if .Values.common.rabbitmq.connectionConfigFromEnvironment }} + - name: JF_SHARED_RABBITMQ_USERNAME + value: {{ include "rabbitmq.user" .}} + - name: JF_SHARED_RABBITMQ_URL + value: {{ include "rabbitmq.url" .}} + {{- end }} + - name: XRAY_K8S_ENV + value: "true" + {{- if .Values.common.extraEnvVars }} + {{- tpl .Values.common.extraEnvVars . | nindent 8 }} + {{- end }} + {{- if .Values.persist.extraEnvVars }} + {{- tpl .Values.persist.extraEnvVars . | nindent 8 }} + {{- end }} + ports: + - containerPort: {{ .Values.persist.internalPort }} + name: http-persist + volumeMounts: + - name: data-volume + mountPath: "{{ .Values.xray.persistence.mountPath }}" +{{- if or .Values.common.customVolumeMounts .Values.global.customVolumeMounts }} +{{ tpl (include "xray.customVolumeMounts" .) . | indent 8 }} +{{- end }} +{{- with .Values.persist.customVolumeMounts }} +{{ tpl . $ | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.persist.resources | indent 10 }} +{{- if .Values.persist.startupProbe.enabled }} + startupProbe: +{{ tpl .Values.persist.startupProbe.config . | indent 10 }} +{{- end }} +{{- if .Values.persist.livenessProbe.enabled }} + livenessProbe: +{{ tpl .Values.persist.livenessProbe.config . | indent 10 }} +{{- end }} + {{- $mountPath := .Values.xray.persistence.mountPath }} + {{- range .Values.xray.loggers }} + - name: {{ . | replace "_" "-" | replace "." "-" }} + image: {{ include "xray.getImageInfoByValue" (list $ "initContainers") }} + imagePullPolicy: {{ $.Values.initContainers.image.pullPolicy }} + {{- if $.Values.containerSecurityContext.enabled }} + securityContext: {{- omit $.Values.containerSecurityContext "enabled" | toYaml | nindent 10 }} + {{- end }} + command: + - 'sh' + - '-c' + - 'sh /scripts/tail-log.sh {{ $mountPath }}/log {{ . }}' + volumeMounts: + - name: data-volume + mountPath: {{ $mountPath }} + - name: tail-logger-script + mountPath: /scripts/tail-log.sh + subPath: tail-log.sh + resources: +{{ toYaml $.Values.xray.loggersResources | indent 10 }} + {{- end }} + {{- if .Values.filebeat.enabled }} + - name: {{ .Values.filebeat.name }} + image: "{{ .Values.filebeat.image.repository }}:{{ .Values.filebeat.image.version }}" + imagePullPolicy: {{ .Values.filebeat.image.pullPolicy }} + args: + - "-e" + - "-E" + - "http.enabled=true" + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- tpl (omit .Values.containerSecurityContext "enabled" | toYaml) . | nindent 10 }} + {{- end }} + volumeMounts: + - name: filebeat-config + mountPath: /usr/share/filebeat/filebeat.yml + readOnly: true + subPath: filebeat.yml + - name: data-volume + mountPath: "{{ .Values.xray.persistence.mountPath }}" + livenessProbe: +{{ toYaml .Values.filebeat.livenessProbe | indent 10 }} + readinessProbe: +{{ toYaml .Values.filebeat.readinessProbe | indent 10 }} + resources: +{{ toYaml .Values.filebeat.resources | indent 10 }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriod }} + {{- end }} +{{- if or .Values.common.customSidecarContainers .Values.global.customSidecarContainers }} +{{ tpl (include "xray.customSidecarContainers" .) . | indent 6 }} +{{- end }} + {{- if or .Values.xray.nodeSelector .Values.global.nodeSelector }} +{{ tpl (include "xray.nodeSelector" .) . | indent 6 }} + {{- end }} + {{- if .Values.affinity }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- else if eq .Values.xray.podAntiAffinity.type "soft" }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + topologyKey: {{ .Values.xray.podAntiAffinity.topologyKey }} + labelSelector: + matchLabels: + app: {{ template "xray.name" . }} + release: {{ .Release.Name }} + {{- else if eq .Values.xray.podAntiAffinity.type "hard" }} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: {{ .Values.xray.podAntiAffinity.topologyKey }} + labelSelector: + matchLabels: + app: {{ template "xray.name" . }} + release: {{ .Release.Name }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + ########## External secrets ########### + {{- if or .Values.xray.customCertificates.enabled .Values.global.customCertificates.enabled }} + - name: ca-certs + secret: + secretName: {{ default .Values.global.customCertificates.certificateSecretName .Values.xray.customCertificates.certificateSecretName }} + {{- end }} + {{- if .Values.systemYamlOverride.existingSecret }} + - name: systemyaml + secret: + secretName: {{ .Values.systemYamlOverride.existingSecret }} + {{- end }} + ############ Config map, Volumes and Custom Volumes ############## + {{- if .Values.xray.loggers }} + - name: tail-logger-script + configMap: + name: {{ template "xray.fullname" . }}-logger + {{- end }} + {{- if not .Values.common.persistence.enabled }} + - name: data-volume + emptyDir: + sizeLimit: {{ .Values.common.persistence.size }} + {{- end }} + {{- if and .Values.common.persistence.enabled .Values.common.persistence.existingClaim }} + - name: data-volume + persistentVolumeClaim: + claimName: {{ .Values.common.persistence.existingClaim }} + {{- end }} + {{- if and .Values.xray.unifiedSecretInstallation (eq (include "xray.checkDuplicateUnifiedCustomVolume" .) "false" ) }} + ######### unifiedSecretInstallation ########### + - name: {{ include "xray.unifiedCustomSecretVolumeName" . }} + secret: + secretName: {{ template "xray.name" . }}-unified-secret + {{- else if not .Values.xray.unifiedSecretInstallation }} + ######### Non unifiedSecretInstallation ########### + {{- if not .Values.systemYamlOverride.existingSecret }} + - name: systemyaml + secret: + secretName: {{ printf "%s-%s" (include "xray.fullname" .) "system-yaml" }} + {{- end }} + {{- end }} + {{- if or .Values.global.rabbitmq.auth.tls.enabled .Values.rabbitmq.auth.tls.enabled }} + - name: rabbitmq-ca-certs + secret: + secretName: {{ template "xray.rabbitmqCustomCertificateshandler" . }} + {{- end }} + +{{- if or .Values.common.customVolumes .Values.global.customVolumes }} +{{ tpl (include "xray.customVolumes" .) . | indent 6 }} +{{- end }} + {{- if .Values.filebeat.enabled }} + - name: filebeat-config + configMap: + name: {{ template "xray.fullname" . }}-filebeat-config + {{- end }} + {{- if .Values.common.configMaps }} + - name: xray-configmaps + configMap: + name: {{ template "xray.fullname" . }}-configmaps + {{- end }} +{{- with .Values.common.persistence }} + {{- if and .enabled (not .existingClaim) }} + volumeClaimTemplates: + - metadata: + name: data-volume + spec: + {{- if .storageClass }} + {{- if (eq "-" .storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .storageClass }}" + {{- end }} + {{- end }} + accessModes: [ "{{ .accessMode }}" ] + resources: + requests: + storage: {{ .size }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-svc.yaml b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-svc.yaml new file mode 100644 index 000000000..756db8ed6 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-svc.yaml @@ -0,0 +1,42 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "xray.fullname" . }} + labels: + app: {{ template "xray.name" . }} + chart: {{ template "xray.chart" . }} + component: {{ .Values.xray.name }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + {{- with .Values.xray.labels }} +{{ toYaml . | indent 4 }} + {{- end }} +{{- with .Values.server.service }} +{{- if .annotations }} + annotations: + {{- with .annotations }} +{{ toYaml . | indent 4 }} + {{- end }} +{{- end }} +spec: + type: {{ .type }} + {{- if .additionalSpec }} +{{ tpl .additionalSpec $ | indent 2 }} + {{- end }} +{{- end }} + ports: + - port: {{ .Values.server.externalPort }} + protocol: TCP + name: http + targetPort: {{ .Values.server.internalPort }} + - port: {{ .Values.router.externalPort }} + protocol: TCP + name: http-router + targetPort: {{ .Values.router.internalPort }} + selector: + app: {{ template "xray.name" . }} + component: {{ .Values.xray.name }} + release: {{ .Release.Name }} +{{- if and (not .Values.splitXraytoSeparateDeployments.gradualUpgrade) .Values.splitXraytoSeparateDeployments.enabled }} + servicename: server +{{- end }} diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-system-yaml.yaml b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-system-yaml.yaml new file mode 100644 index 000000000..95d82eba9 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-system-yaml.yaml @@ -0,0 +1,15 @@ +{{- if and (not .Values.systemYamlOverride.existingSecret) (not .Values.xray.unifiedSecretInstallation) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "xray.fullname" . }}-system-yaml + labels: + app: {{ template "xray.name" . }} + chart: {{ template "xray.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +type: Opaque +stringData: + system.yaml: | +{{ include "xray.finalSystemYaml" . | nindent 4 }} +{{- end }} diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-unified-secret.yaml b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-unified-secret.yaml new file mode 100644 index 000000000..51e421228 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/templates/xray-unified-secret.yaml @@ -0,0 +1,70 @@ +{{- if .Values.xray.unifiedSecretInstallation }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "xray.name" . }}-unified-secret + labels: + app: "{{ template "xray.name" $ }}" + chart: "{{ template "xray.chart" $ }}" + component: "{{ $.Values.xray.name }}" + heritage: {{ $.Release.Service | quote }} + release: {{ $.Release.Name | quote }} +type: Opaque + +stringData: + +{{- if .Values.common.customSecrets }} +{{- range .Values.common.customSecrets }} + {{ .key }}: | +{{ .data | indent 4 -}} +{{- end }} +{{- end }} + +{{- if not .Values.systemYamlOverride.existingSecret }} + system.yaml: | +{{ include "xray.finalSystemYaml" . | nindent 4 }} +{{- end }} + +data: + {{- if or .Values.xray.masterKey .Values.global.masterKey }} + {{- if not (or .Values.xray.masterKeySecretName .Values.global.masterKeySecretName) }} + master-key: {{ include "xray.masterKey" . | b64enc | quote }} + {{- end }} + {{- end }} + {{- if not (or .Values.xray.joinKey .Values.global.joinKey) }} + {{- if not (or .Values.xray.joinKeySecretName .Values.global.joinKeySecretName) }} + {{ required "\n\n.Values.xray.joinKey/joinKeySecretName or .Values.global.joinKey/joinKeySecretName is required!\n\n" .Values.xray.joinKey }} + {{- end }} + {{- end }} + {{- if or .Values.xray.joinKey .Values.global.joinKey }} + {{- if not (or .Values.xray.joinKeySecretName .Values.global.joinKeySecretName) }} + join-key: {{ include "xray.joinKey" . | b64enc | quote }} + {{- end }} + {{- end }} + + {{- if not (or .Values.xray.executionServiceAesKeySecretName .Values.global.executionServiceAesKeySecretName) }} + {{- if not (or .Values.xray.executionServiceAesKey .Values.global.executionServiceAesKey) }} + execution-service-aes-key: "{{ randAlphaNum 32 | b64enc }}" + {{- else if or .Values.xray.executionServiceAesKey .Values.global.executionServiceAesKey }} + execution-service-aes-key: {{ include "xray.executionServiceAesKey" . | b64enc | quote }} + {{- end }} + {{- end }} + + {{- if and (not .Values.database.secrets) (not .Values.postgresql.enabled) }} + {{- if or .Values.database.url .Values.database.user .Values.database.password .Values.database.actualUsername }} + {{- with .Values.database.url }} + db-url: {{ tpl . $ | b64enc | quote }} + {{- end }} + {{- with .Values.database.user }} + db-user: {{ tpl . $| b64enc | quote }} + {{- end }} + {{- with .Values.database.password }} + db-password: {{ tpl . $ | b64enc | quote }} + {{- end }} + {{- if .Values.database.actualUsername }} + db-actualUsername: {{ .Values.database.actualUsername | b64enc | quote }} + {{- end }} + {{- end }} + {{- end }} + +{{- end }} diff --git a/stable/jfrog-platform/local_dependancy_charts/xray/values.yaml b/stable/jfrog-platform/local_dependancy_charts/xray/values.yaml new file mode 100644 index 000000000..804c635f0 --- /dev/null +++ b/stable/jfrog-platform/local_dependancy_charts/xray/values.yaml @@ -0,0 +1,1768 @@ +# Default values for Xray HA. +# This is a YAML-formatted file. +# Beware when changing values here. You should know what you are doing! +# Access the values with {{ .Values.key.subkey }} +global: + # imageRegistry: releases-docker.jfrog.io + # imagePullSecrets: + # - myRegistryKeySecretName + ## Chart.AppVersion can be overidden using global.versions.xray, common.xrayVersion or image tags + ## Note: Order of preference is 1) global.versions 2) common.xrayVersion 3) image tags 4) Chart.AppVersion + versions: {} + # xray: + # initContainers: + # router: + # joinKey: + # masterKey: + # joinKeySecretName: + # masterKeySecretName: + # executionServiceAesKey: + # executionServiceAesKeySecretName: + + ## Note: tags customInitContainersBegin,customInitContainers,customVolumes,customVolumeMounts,customSidecarContainers can be used both from global and application level simultaneously + # customInitContainersBegin: | + + # customInitContainers: | + + # customVolumes: | + + # customVolumeMounts: | + + # customSidecarContainers: | + + ## certificates added to this secret will be copied to $JFROG_HOME/xray/var/etc/security/keys/trusted directory + customCertificates: + enabled: false + # certificateSecretName: + + ## Applies to xray pods + nodeSelector: {} + + ## Applies to platform charts. + rabbitmq: + auth: + tls: + enabled: + autoGenerated: + + xray: + # Rabbitmq settings that are specific to Xray + rabbitmq: + replicaCount: 1 + haQuorum: + enabled: false + waitForPreviousPodsOnInitialStartup: true + vhost: xray_haq + + autoscaling: + keda: + annotations: {} + +deployment: + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 0 + +## String to partially override xray.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override xray.fullname template +## +# fullnameOverride: + +imagePullPolicy: IfNotPresent + +# Init containers +initContainers: + image: + registry: releases-docker.jfrog.io + repository: ubi9/ubi-minimal + tag: 9.5.1736404155 + pullPolicy: IfNotPresent + resources: + requests: + memory: "50Mi" + cpu: "10m" + limits: + memory: "1Gi" + cpu: "1" + +# For supporting pulling from private registries +imagePullSecrets: + # - myRegistryKeySecretName + +## Xray systemYaml override +## This is for advanced usecases where users wants to provide their own systemYaml for configuring xray +## Refer - https://www.jfrog.com/confluence/display/JFROG/Xray+System+YAML +## Note: This will override existing (default) .Values.xray.systemYaml in values.yaml +## Alternatively, systemYaml can be overidden via customInitContainers using external sources like vaults, external repositories etc. Please refer customInitContainer section below for an example. +## Note: Order of preference is 1) customInitContainers 2) systemYamlOverride existingSecret 3) default systemYaml in values.yaml +systemYamlOverride: +## You can use a pre-existing secret by specifying existingSecret + existingSecret: +## The dataKey should be the name of the secret data key created. + dataKey: + +replicaCount: 1 + +## Database configurations +## Use the wait-for-db init container. Set to false to skip +waitForDatabase: true + +xray: + name: xray + labels: {} + persistence: + mountPath: /var/opt/jfrog/xray + + # adding minAvailable for Xray Pod Disruption Budget + # minAvailable: 1 + + # unifiedSecretInstallation flag enables single unified secret holding all the xray internal(chart) secrets, It won't be affecting external secrets. + ## Note: unifiedSecretInstallation flag is enabled by true by default from chart version 103.91.x, Users can switch to false to continue with the old way of secret creation. + unifiedSecretInstallation: true + + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + schedulerName: + + # Create a priority class for the Xray pod or use an existing one + # NOTE - Maximum allowed value of a user defined priority is 1000000000 + priorityClass: + create: false + value: 1000000000 + ## Override default name + # name: + ## Use an existing priority class + # existingPriorityClass: + + ## certificates added to this secret will be copied to $JFROG_HOME/xray/var/etc/security/keys/trusted directory + customCertificates: + enabled: false + # certificateSecretName: + + ## Add custom annotations for xray pods + annotations: {} + + ## Xray requires a unique master key + ## You can generate one with the command: + ## 'openssl rand -hex 32' + ## Pass it to helm with '--set xray.masterKey=${MASTER_KEY}' + ## IMPORTANT: You should NOT use the example masterKey for a production deployment! + masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF + ## Alternatively, you can use a pre-existing secret with a key called master-key by specifying masterKeySecretName + # masterKeySecretName: + + ## Join Key to connect to main Artifactory. Mandatory + joinKey: EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE + + ## Xray AES key used by execution server to the xray server and analysis containers. + ## You can generate one with the command: + ## 'openssl rand -hex 16' + # executionServiceAesKey: + ## Alternatively, you can use a pre-existing secret with a key called execution-service-aes-key by specifying executionServiceAesKeySecretName + # executionServiceAesKeySecretName: + + ## Alternatively, you can use a pre-existing secret with a key called join-key by specifying joinKeySecretName + # joinKeySecretName: + ## If false, all service console logs will not redirect to a common console.log + consoleLog: false + ## Artifactory URL . Mandatory + jfrogUrl: + + ## Mongo details are used only for Manual migration of data from Mongo to Postgres in Xray 2.x to 3.x + # mongoUrl: + # mongoUsername: + # mongoPassword: + + ## Only used if "affinity" is empty + podAntiAffinity: + ## Valid values are "soft" or "hard"; any other value indicates no anti-affinity + type: "soft" + topologyKey: "kubernetes.io/hostname" + + # To enable set `.Values.xray.openMetrics.enabled` to `true` + # Refer - https://www.jfrog.com/confluence/display/JFROG/Open+Metrics + openMetrics: + enabled: false + ## Settings for pushing metrics to Insight - enable filebeat to true + filebeat: + enabled: false + log: + enabled: false + ## Log level for filebeat. Possible values: debug, info, warning, or error. + level: "info" + ## Elasticsearch details for filebeat to connect + elasticsearch: + url: "Elasticsearch url where JFrog Insight is installed For example, http://:8082" + username: "" + password: "" + + ## System YAML entries now reside under files/system.yaml. + ## You can provide the specific values that you want to add or override under 'xray.extraSystemYaml'. + ## For example: + ## extraSystemYaml: + ## shared: + ## logging: + ## consoleLog: + ## enabled: true + ## The entries provided under 'xray.extraSystemYaml' are merged with files/system.yaml to create the final system.yaml. + ## If you have already provided system.yaml under, 'xray.systemYaml', the values in that entry take precedence over files/system.yaml + ## You can modify specific entries with your own value under `xray.extraSystemYaml`, The values under extraSystemYaml overrides the values under 'xray.systemYaml' and files/system.yaml + + extraSystemYaml: {} + + ## systemYaml is intentionally commented and the previous content has been moved under files/system.yaml. + ## You have to add the all entries of the system.yaml file here, and it overrides the values in files/system.yaml. + # systemYaml: + + # Sidecar containers for tailing Xray logs + loggers: [] + # - router-request.log + # - router-service.log + # - router-traefik.log + # - xray-request.log + # - xray-analysis-service.log + # - xray-analysis-metrics.log + # - xray-server-service.log + # - xray-server-metrics.log + # - xray-indexer-service.log + # - xray-indexer-metrics.log + # - xray-analysis-stack.log + # - xray-indexer-stack.log + # - xray-persist-stack.log + # - xray-persist-metrics.log + # - xray-server-stack.log + # - observability-metrics.log + # - observability-request.log + # - observability-service.log + + # Loggers containers resources + loggersResources: {} + # requests: + # memory: "64Mi" + # cpu: "25m" + # limits: + # memory: "128Mi" + # cpu: "50m" + +## Role Based Access +## Ref: https://kubernetes.io/docs/admin/authorization/rbac/ +rbac: + create: false + role: + ## Rules to create. It follows the role specification + rules: + - apiGroups: + - '' + resources: + - services + - endpoints + - pods + - pods/log + - events + verbs: + - get + - watch + - list + - apiGroups: + - 'batch' + resources: + - jobs + verbs: + - get + - watch + - list + - create + - delete + +networkpolicy: [] + # Allows all ingress and egress + # - name: xray + # podSelector: + # matchLabels: + # app: xray + # egress: + # - {} + # ingress: + # - {} + # Uncomment to allow only xray pods to communicate with postgresql (if postgresql.enabled is true) + # - name: postgres + # podSelector: + # matchLabels: + # app.kubernetes.io/name: postgresql + # ingress: + # - from: + # - podSelector: + # matchLabels: + # app: xray + # Uncomment to allow only xray pods to communicate with rabbitmq (if rabbitmq.enabled is true) + # - name: rabbitmq + # podSelector: + # matchLabels: + # app.kubernetes.io/name: rabbitmq + # ingress: + # - from: + # - podSelector: + # matchLabels: + # app: xray + +## Affinity rules +nodeSelector: {} +affinity: {} +tolerations: [] + +## Apply horizontal pod auto scaling on Xray pods +## Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 3 + targetCPUUtilizationPercentage: 70 + targetMemoryUtilizationPercentage: 90 + ## Specify if using the keda hpa or regular basic hpa + ## Note: keda should be installed on the target cluster + ## Ref: https://keda.sh/docs/2.10/deploy/ + keda: + enabled: false + annotations: {} + scaleUp: + stabilizationWindowSeconds: 90 + policies: + - type: Pods + value: 3 + periodSeconds: 30 + scaleDown: + stabilizationWindowSeconds: 90 + policies: + - type: Pods + value: 1 + periodSeconds: 30 + pollingInterval: 10 + cooldownPeriod: 10 + queues: + - name: analysis + value: "100" + - name: index + value: "100" + - name: persist + value: "100" + - name: policyEnforcer + value: "100" + - name: impactAnalysis + value: "100" + +## Service Account +## Ref: https://kubernetes.io/docs/admin/service-accounts-admin/ +## +serviceAccount: + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + ## Service Account annotations + annotations: {} + ## Explicitly mounts the API credentials for the Service Account + automountServiceAccountToken: true + +## @param podSecurityContext.enabled enable the pod's Security Context +podSecurityContext: + enabled: true + runAsNonRoot: true + runAsUser: 1035 + runAsGroup: 1035 + fsGroup: 1035 + # fsGroupChangePolicy: "Always" + # seLinuxOptions: {} + +## @param containerSecurityContext.enabled enable the container's Security Context +containerSecurityContext: + enabled: true + runAsNonRoot: true + allowPrivilegeEscalation: false + capabilities: + drop: + - NET_RAW + + +# PostgreSQL +## Please note bundled postgresql is not recommended for production use. +## Configuration values for the postgresql dependency +## ref: https://github.com/kubernetes/charts/blob/master/stable/postgresql/README.md +## +postgresql: + enabled: true + image: + registry: releases-docker.jfrog.io + repository: bitnami/postgresql + tag: 15.6.0-debian-11-r16 + auth: + username: "xray" + password: "" + database: "xraydb" + primary: + extendedConfiguration: | + max_connections = 1500 + listen_addresses = '*' + persistence: + size: 300Gi + service: + ports: + postgresql: 5432 + nodeSelector: {} + affinity: {} + tolerations: [] + ## @param primary.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + readReplicas: + nodeSelector: {} + affinity: {} + tolerations: [] + +## If NOT using the PostgreSQL in this chart (postgresql.enabled=false), +database: + type: "postgresql" + driver: "org.postgresql.Driver" + ## If you would like this chart to create the secret containing the database url, user, password - use these below values + url: + user: + password: + ## When working with Azure managed PG you have to provide the actualUsername. The application will pick actualUsername and will use it in some cases where it is needed + actualUsername: + ## If you have existing Kubernetes secrets containing db credentials, use + ## these values + secrets: {} + # user: + # name: "xray-database-creds" + # key: "db-user" + # password: + # name: "xray-database-creds" + # key: "db-password" + # url: + # name: "xray-database-creds" + # key: "db-url" + # actualUsername: + # name: "xray-database-creds" + # key: "db-actualUsername" + +# RabbitMQ +## Configuration values for the rabbitmq dependency +## ref: https://github.com/bitnami/charts/blob/master/bitnami/rabbitmq/README.md +## +rabbitmq: + enabled: true + ## Enable the flag if the feature flags in rabbitmq is enabled manually + rabbitmqUpgradeReady: false + replicaCount: 1 + rbac: + create: true + image: + registry: releases-docker.jfrog.io + repository: bitnami/rabbitmq + tag: 3.13.7-debian-12-r5 + extraPlugins: "rabbitmq_management" + + auth: + ## Enable encryption to rabbitmq + ## ref: https://www.rabbitmq.com/ssl.html + ## @param auth.tls.enabled Enable TLS support on RabbitMQ + ## @param auth.tls.autoGenerated Generate automatically self-signed TLS certificates + ## @param auth.tls.failIfNoPeerCert When set to true, TLS connection will be rejected if client fails to provide a certificate + ## @param auth.tls.sslOptionsVerify Should [peer verification](https://www.rabbitmq.com/ssl.html#peer-verification) be enabled? + ## @param auth.tls.sslOptionsPassword.enabled Enable usage of password for private Key + ## @param auth.tls.sslOptionsPassword.existingSecret Name of existing Secret containing the sslOptionsPassword + ## @param auth.tls.sslOptionsPassword.key Enable Key referring to sslOptionsPassword in Secret specified in auth.tls.sslOptionsPassword.existingSecret + ## @param auth.tls.sslOptionsPassword.password Use this string as Password. If set, auth.tls.sslOptionsPassword.existingSecret and auth.tls.sslOptionsPassword.key are ignored + ## @param auth.tls.caCertificate Certificate Authority (CA) bundle content + ## @param auth.tls.serverCertificate Server certificate content + ## @param auth.tls.serverKey Server private key content + ## @param auth.tls.existingSecret Existing secret with certificate content to RabbitMQ credentials + ## @param auth.tls.existingSecretFullChain Whether or not the existing secret contains the full chain in the certificate (`tls.crt`). Will be used in place of `ca.cert` if `true`. + ## @param auth.tls.overrideCaCertificate Existing secret with certificate content be mounted instead of the `ca.crt` coming from caCertificate or existingSecret/existingSecretFullChain. + ## + tls: + enabled: false + # By default TLS certs are autogenerated, if you wish to add your own certs, please set this to false. + autoGenerated: true + failIfNoPeerCert: false + sslOptionsVerify: verify_peer + failIfNoCert: false + sslOptionsPassword: + enabled: false + existingSecret: "" + key: "" + password: "" + + caCertificate: + serverCertificate: + serverKey: + + # Rabbitmq tls-certs secret name, as by default it will have {{ .Release.Name }}-rabbitmq-certs. + existingSecret: + existingSecretFullChain: false + overrideCaCertificate: "" + username: guest + password: password + ## @param auth.securePassword Whether to set the RabbitMQ password securely. This is incompatible with loading external RabbitMQ definitions and 'true' when not setting the auth.password parameter. + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#environment-variables + securePassword: false + ## Alternatively, you can use a pre-existing secret with a key called rabbitmq-password by specifying existingPasswordSecret + # existingPasswordSecret: + erlangCookie: XRAYRABBITMQCLUSTER + # existingErlangSecret: + # memoryHighWatermark: + # ## @param memoryHighWatermark.enabled Enable configuring Memory high watermark on RabbitMQ + # ## + # enabled: false + # ## @param memoryHighWatermark.type Memory high watermark type. Either `absolute` or `relative` + # ## + # type: "absolute" + # ## Memory high watermark value. + # ## @param memoryHighWatermark.value Memory high watermark value + # ## The default value of 0.4 stands for 40% of available RAM + # ## Note: the memory relative limit is applied to the resource.limits.memory to calculate the memory threshold + # ## You can also use an absolute value, e.g.: 256MB + # ## + # value: 700MB + # resources: + # requests: + # memory: "512Mi" + # cpu: "500m" + # limits: + # memory: "1Gi" + # cpu: "1" + ## RabbitMQ maximum available scheduler threads and online scheduler threads. By default it will create a thread per CPU detected, with the following parameters you can tune it manually. + maxAvailableSchedulers: null + onlineSchedulers: null + ## To support upgrade from 3.8.x to 3.11.x , featureFlags are needed + ## ref: https://blog.rabbitmq.com/posts/2022/07/required-feature-flags-in-rabbitmq-3.11/ + featureFlags: drop_unroutable_metric,empty_basic_get_metric,implicit_default_bindings,maintenance_mode_status,quorum_queue,stream_queue,user_limits,virtual_host_metadata + ## Additional environment variables to set + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/rabbitmq#adding-extra-environment-variables + extraEnvVars: + - name: RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS + value: "+S 2:2 +sbwt none +sbwtdcpu none +sbwtdio none" + service: + ports: + amqp: 5672 + amqpTls: 5671 + manager: 15672 + external: + username: + password: + url: + erlangCookie: + secrets: {} + # username: + # name: "xray-rabbitmq-creds" + # key: "username" + # password: + # name: "xray-rabbitmq-creds" + # key: "password" + # url: + # name: "xray-rabbitmq-creds" + # key: "url" + persistence: + enabled: true + accessMode: ReadWriteOnce + size: 20Gi + ## Load Definitions - https://www.rabbitmq.com/management.html#load-definitions + # ref : https://github.com/bitnami/charts/tree/master/bitnami/rabbitmq#load-definitions + extraSecretsPrependReleaseName: true + extraSecrets: + load-definition: + load_definition.json: | + { + "permissions": [ + { + "user": "{{ .Values.auth.username }}", + "vhost": "/", + "configure": ".*", + "write": ".*", + "read": ".*" + }, + { + "user": "{{ .Values.auth.username }}", + "vhost": "{{ .Values.global.xray.rabbitmq.haQuorum.vhost }}", + "configure": ".*", + "write": ".*", + "read": ".*" + } + ], + "users": [ + { + "name": "{{ .Values.auth.username }}", + "password": "{{ .Values.auth.password }}", + "tags": "administrator" + } + ], + "vhosts": [ + { + "name": "/" + }, + { + "name": "{{ .Values.global.xray.rabbitmq.haQuorum.vhost }}" + } + ], + "policies": [ + {{- if not .Values.global.xray.rabbitmq.haQuorum.enabled }} + { + "name": "ha-all", + "apply-to": "all", + "pattern": ".*", + "vhost": "/", + "definition": { + "ha-mode": "all", + "ha-sync-mode": "automatic" + } + } + {{- end }} + ] + } + loadDefinition: + enabled: true + existingSecret: '{{ .Release.Name }}-load-definition' + nodeSelector: {} + tolerations: [] + affinity: {} + containerSecurityContext: + enabled: true + allowPrivilegeEscalation: false + ## Upgrade of rabbitmq from 3.8.x to 3.11.x needs the feature flags to be enabled. + ## Ref: (https://blog.rabbitmq.com/posts/2022/07/required-feature-flags-in-rabbitmq-3.11/ + ## migration enable will perform `rabbitmqctl enable_feature_flag all` command on the existing rabbitmq before starting the upgrade + migration: + ## Migration is required to be performed only once hence this option can be disabled once the feature flags are enabled in rabbitmq. + enabled: true + ## Another uses of migration hook are: + ## - Deleting StatefulSet for allowing updating certain fields that require it: + ## Changing podManagementPolicy OrderedReady -> Parallel requires deleting stateful set + ## - Deleting ha-all mirror policy on migrating to Quorum Queues + deleteStatefulSetToAllowFieldUpdate: + enabled: false + removeHaPolicyOnMigrationToHaQuorum: + enabled: false + image: + registry: releases-docker.jfrog.io + repository: bitnami/kubectl + tag: 1.32.0 + ## Service account for the pre-upgrade hook to perform rabbitmq migration + serviceAccount: + create: true + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + ## Explicitly mounts the API credentials for the Service Account + ## Service Account annotations + annotations: {} + automountServiceAccountToken: true + rbac: + create: true + role: + ## Rules to create. It follows the role specification + rules: + - apiGroups: + - "" + resources: + - pods/exec + - pods + verbs: + - create + - get + - list + - apiGroups: + - "apps" + resources: + - statefulsets + verbs: + - get + - list + - delete + + + # This is automatically set based on rabbitmqTLS enabled flag. + extraConfiguration: |- + management.listener.ssl = {{ template "xray.rabbitmq.isManagementListenerTlsEnabledInContext" . }} + + initContainers: | + {{- if and .Values.global.xray.rabbitmq.haQuorum.enabled .Values.global.xray.rabbitmq.haQuorum.waitForPreviousPodsOnInitialStartup }} + - name: "wait-for-previous-pods" + image: "{{ template "rabbitmq.image" . }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + env: + - name: RABBITMQ_ERL_COOKIE + valueFrom: + secretKeyRef: + name: {{ template "rabbitmq.secretErlangName" . }} + key: rabbitmq-erlang-cookie + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: K8S_SERVICE_NAME + value: {{ printf "%s-%s" (include "common.names.fullname" .) (default "headless" .Values.servicenameOverride) }} + {{- if (eq "hostname" .Values.clustering.addressType) }} + - name: RABBITMQ_NODE_NAME + value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}" + - name: K8S_HOSTNAME_SUFFIX + value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}" + {{- else }} + - name: RABBITMQ_NODE_NAME + value: "rabbit@$(MY_POD_NAME)" + {{- end }} + - name: RABBITMQ_MNESIA_DIR + value: "{{ .Values.persistence.mountPath }}/$(RABBITMQ_NODE_NAME)" + command: + - /bin/bash + args: + - -ecx + - | + echo $HOSTNAME + if [[ $HOSTNAME == *-0 ]]; then + exit 0 + fi + if [ -d "$RABBITMQ_MNESIA_DIR" ]; then + exit 0 + fi + + # wait for zero pod to start running and accept requests + zero_pod_name=$(echo $MY_POD_NAME | sed -E "s/-[[:digit:]]$/-0/") + zero_pod_node_name=$(echo "$RABBITMQ_NODE_NAME" | sed -E "s/^rabbit@$MY_POD_NAME/rabbit@$zero_pod_name/") + maxIterations=60 + i=1 + while true; do + rabbitmq-diagnostics -q check_running -n $zero_pod_node_name --longnames --erlang-cookie $RABBITMQ_ERL_COOKIE && \ + rabbitmq-diagnostics -q check_local_alarms -n $zero_pod_node_name --longnames --erlang-cookie $RABBITMQ_ERL_COOKIE && \ + break || sleep 5; + if [ "$i" == "$maxIterations" ]; then exit 1; fi + i=$((i+1)) + done; + + # node x waits for x previous nodes to join cluster (since node number is zero based) + nodeSerialNum=$(echo "$MY_POD_NAME" | grep -o "[0-9]*$") + timeoutSeconds=180 + rabbitmqctl --erlang-cookie $RABBITMQ_ERL_COOKIE \ + --node $zero_pod_node_name --longnames \ + await_online_nodes $nodeSerialNum \ + --timeout $timeoutSeconds || exit 1 + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: {{ .Values.persistence.mountPath }} + {{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- end }} + +# Common Xray settings +common: + ## Note that by default we use appVersion to get image tag + # xrayVersion: + + # Spread Xray pods evenly across your nodes or some other topology + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: kubernetes.io/hostname + # whenUnsatisfiable: DoNotSchedule + # labelSelector: + # matchLabels: + # app: '{{ template "xray.name" . }}' + # role: '{{ template "xray.name" . }}' + # release: "{{ .Release.Name }}" + + # Xray configuration to be written to xray_config.yaml + xrayConfig: + stdOutEnabled: true + indexAllBuilds: false + support-router: true + + # Use rabbitmq connection config from environment variables. + # If false, then connection details should be set directly in system.yaml (systemYaml section). + # When using external rabbitmq, set this to false + rabbitmq: + connectionConfigFromEnvironment: true + waitForReplicasQuorumOnStartup: true + + ## Custom command to run before Xray startup. Runs BEFORE any microservice-specific preStartCommand + preStartCommand: + + ## Add custom volumes + # If .Values.xray.unifiedSecretInstallation is true then secret name should be '{{ template "xray.name" . }}-unified-secret'. + customVolumes: | + # - name: custom-script + # configMap: + # name: custom-script + + ## Add custom volumesMounts + customVolumeMounts: | + # - name: custom-script + # mountPath: /scripts/script.sh + # subPath: script.sh + + # Add any list of configmaps to Xray + configMaps: | + # posthook-start.sh: |- + # echo "This is a post start script" + # posthook-end.sh: |- + # echo "This is a post end script" + + ## Add custom init containers execution before predefined init containers + customInitContainersBegin: | + # - name: "custom-setup" + # image: {{ include "xray.getImageInfoByValue" (list . "initContainers") }} + # imagePullPolicy: "{{ .Values.initContainers.image.pullPolicy }}" + # {{- if .Values.containerSecurityContext.enabled }} + # securityContext: {{- tpl (omit .Values.containerSecurityContext "enabled" | toYaml) . | nindent 10 }} + # {{- end }} + # command: + # - 'sh' + # - '-c' + # - 'touch {{ .Values.xray.persistence.mountPath }}/example-custom-setup' + # volumeMounts: + # - mountPath: "{{ .Values.xray.persistence.mountPath }}" + # name: data-volume + + ## Add custom init containers execution after predefined init containers + customInitContainers: | + # - name: "custom-systemyaml-setup" + # image: "{{ include "xray.getImageInfoByValue" (list . "initContainers") }}" + # imagePullPolicy: "{{ .Values.initContainers.image.pullPolicy }}" + # {{- if .Values.containerSecurityContext.enabled }} + # securityContext: {{- tpl (omit .Values.containerSecurityContext "enabled" | toYaml) . | nindent 10 }} + # {{- end }} + # command: + # - 'sh' + # - '-c' + # - 'wget -O {{ .Values.xray.persistence.mountPath }}/etc/system.yaml https:///systemyaml' + # volumeMounts: + # - mountPath: "{{ .Values.xray.persistence.mountPath }}" + # name: data-volume + + ## Add custom sidecar containers + # - The provided example uses a custom volume (customVolumes) + # - The provided example shows running container as root (id 0) + customSidecarContainers: | + # - name: "sidecar-list-etc" + # image: {{ include "xray.getImageInfoByValue" (list . "initContainers") }} + # imagePullPolicy: {{ .Values.initContainers.image.pullPolicy }} + # {{- if .Values.containerSecurityContext.enabled }} + # securityContext: {{- tpl (omit .Values.containerSecurityContext "enabled" | toYaml) . | nindent 10 }} + # {{- end }} + # command: + # - 'sh' + # - '-c' + # - > + # while true; do echo "running in sidecar"; sleep 2; done + # volumeMounts: + # - mountPath: "{{ .Values.xray.persistence.mountPath }}" + # name: data-volume + # resources: + # requests: + # memory: "32Mi" + # cpu: "50m" + # limits: + # memory: "128Mi" + # cpu: "100m" + + # If .Values.xray.unifiedSecretInstallation is true then secret name should be '{{ template "xray.name" . }}-unified-secret'. + customSecrets: + # - name: custom-secret + # key: custom-secret.yaml + # data: > + # custom_secret_config: + # parameter1: value1 + # parameter2: value2 + # - name: custom-secret2 + # key: custom-secret2.config + # data: | + # here the custom secret 2 config + + persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + + accessMode: ReadWriteOnce + ## Container storage limit if persistence.enabled: false + ## Otherwise PVC size + size: 50Gi + ## server data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## @param extraEnvVars Extra environment variables to add to xray containers + ## E.g: + ## extraEnvVars: + ## - name: FOO + ## value: BAR + ## + extraEnvVars: + +analysis: + name: xray-analysis + ## Note that by default we use appVersion to get image tag/version + image: + registry: releases-docker.jfrog.io + repository: jfrog/xray-analysis + # tag: + internalPort: 7000 + externalPort: 7000 + annotations: {} + extraEnvVars: + + # Add lifecycle hooks for the analysis pod + lifecycle: {} + # postStart: + # exec: + # command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] + # preStop: + # exec: + # command: ["/bin/sh", "-c", "echo Hello from the preStart handler > /usr/share/message"] + + ## Add custom volumesMounts + customVolumeMounts: | + # - name: custom-script + # mountPath: /scripts/script.sh + # subPath: script.sh + + livenessProbe: + enabled: true + config: | + httpGet: + path: /api/v1/system/liveness + port: {{ .Values.analysis.internalPort }} + initialDelaySeconds: {{ if semverCompare " /usr/share/message"] + # preStop: + # exec: + # command: ["/bin/sh", "-c", "echo Hello from the preStart handler > /usr/share/message"] + + ## Add custom volumesMounts + customVolumeMounts: | + # - name: custom-script + # mountPath: /scripts/script.sh + # subPath: script.sh + + livenessProbe: + enabled: true + config: | + exec: + command: + - sh + - -c + - curl -s -k --fail --max-time {{ .Values.probes.timeoutSeconds }} http://localhost:{{ .Values.sbom.internalPort }}/api/v1/system/liveness + initialDelaySeconds: {{ if semverCompare " /usr/share/message"] + # preStop: + # exec: + # command: ["/bin/sh", "-c", "echo Hello from the preStart handler > /usr/share/message"] + + ## Add custom volumesMounts + customVolumeMounts: | + # - name: custom-script + # mountPath: /scripts/script.sh + # subPath: script.sh + + livenessProbe: + enabled: true + config: | + exec: + command: + - sh + - -c + - curl -s -k --fail --max-time {{ .Values.probes.timeoutSeconds }} http://localhost:{{ .Values.panoramic.internalPort }}/api/v1/system/liveness + initialDelaySeconds: {{ if semverCompare " /usr/share/message"] + # preStop: + # exec: + # command: ["/bin/sh", "-c", "echo Hello from the preStart handler > /usr/share/message"] + + ## Add custom volumesMounts + customVolumeMounts: | + # - name: custom-script + # mountPath: /scripts/script.sh + # subPath: script.sh + + livenessProbe: + enabled: true + config: | + exec: + command: + - sh + - -c + - curl -s -k --fail --max-time {{ .Values.probes.timeoutSeconds }} http://localhost:{{ .Values.policyenforcer.internalPort }}/api/v1/system/liveness + initialDelaySeconds: {{ if semverCompare " /usr/share/message"] + # preStop: + # exec: + # command: ["/bin/sh", "-c", "echo Hello from the preStart handler > /usr/share/message"] + + ## Add custom volumesMounts + customVolumeMounts: | + # - name: custom-script + # mountPath: /scripts/script.sh + # subPath: script.sh + + livenessProbe: + enabled: true + config: | + httpGet: + path: /api/v1/system/liveness + port: {{ .Values.indexer.internalPort }} + initialDelaySeconds: {{ if semverCompare " /usr/share/message"] + # preStop: + # exec: + # command: ["/bin/sh", "-c", "echo Hello from the preStart handler > /usr/share/message"] + + ## Add custom volumesMounts + customVolumeMounts: | + # - name: custom-script + # mountPath: /scripts/script.sh + # subPath: script.sh + + livenessProbe: + enabled: true + config: | + httpGet: + path: /api/v1/system/liveness + port: {{ .Values.persist.internalPort }} + initialDelaySeconds: {{ if semverCompare " /usr/share/message"] + # preStop: + # exec: + # command: ["/bin/sh", "-c", "echo Hello from the preStart handler > /usr/share/message"] + + # mailServer: "" + # indexAllBuilds: false + + ## Add custom volumesMounts + customVolumeMounts: | + # - name: custom-script + # mountPath: /scripts/script.sh + # subPath: script.sh + + service: + type: ClusterIP + name: xray + annotations: {} + ## Provide additional spec to xray service + ## Example: + ## additionalSpec: | + ## customKey: customVal + ## + additionalSpec: | + + statefulset: + annotations: {} + + livenessProbe: + enabled: true + config: | + httpGet: + path: /api/v1/system/liveness + port: {{ .Values.server.internalPort }} + initialDelaySeconds: {{ if semverCompare " /usr/share/message"] + # preStop: + # exec: + # command: ["/bin/sh", "-c", "echo Hello from the preStart handler > /usr/share/message"] + + annotations: {} + + ## Add custom volumesMounts + customVolumeMounts: | + # - name: custom-script + # mountPath: /scripts/script.sh + # subPath: script.sh + + livenessProbe: + enabled: true + config: | + httpGet: + path: /router/api/v1/system/liveness + port: {{ .Values.router.internalPort }} + scheme: {{ include "xray.scheme" . | upper }} + initialDelaySeconds: {{ if semverCompare " /usr/share/message"] + # preStop: + # exec: + # command: ["/bin/sh", "-c", "echo Hello from the preStart handler > /usr/share/message"] + extraEnvVars: + + livenessProbe: + enabled: true + config: | + httpGet: + path: observability/api/v1/system/liveness + port: 8082 + scheme: {{ include "xray.scheme" . | upper}} + initialDelaySeconds: {{ if semverCompare "/dev/null + exit_status=$? + if [[ $exit_status -eq 0 ]]; then + ready=true + echo "catalogdb database is available" + else + echo "Database not ready, retrying..." + current_time=$(date +%s) + elapsed_time=$(( current_time - start_time )) + if [[ $elapsed_time -ge 600 ]]; then + echo "Timeout reached: PostgreSQL did not become available within 10 minutes." + exit 1 + fi + fi + sleep 1 + done + {{- end -}} + {{- end -}} + enabled: false + ## Catalog db creation in xray bundled postgresql. If set to true, then the catalog db will be created in xray bundled postgresql. + createCatalogDb: + enabled: true + image: releases-docker.jfrog.io/postgres:15.6-alpine + ## Change database connection details to external database. Bundled postgresql is not recomended for production use. + ## When using bundled postgresql, provide the same credentials as used by xray. + ## Xray post upgrade hook will create the catalog db in the bundled postgresql. + ## Catalog customInitContainers will wait for the database to be available before starting the catalog. (Only applicable for bundled postgresql) + database: + url: "postgres://{{ .Release.Name }}-postgresql:5432/catalogdb?sslmode=disable" + user: xray + password: "" \ No newline at end of file From 422cb1535adf140ebf03433c9d13e02ffaa47aff Mon Sep 17 00:00:00 2001 From: grig777 Date: Mon, 3 Mar 2025 13:28:47 -0500 Subject: [PATCH 2/4] Dissable github action until we enable internal runners for public --- .github/workflows/deploy-helm-chart.yml | 54 ++++++++++++------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/.github/workflows/deploy-helm-chart.yml b/.github/workflows/deploy-helm-chart.yml index 722a421b7..5b757d5dc 100644 --- a/.github/workflows/deploy-helm-chart.yml +++ b/.github/workflows/deploy-helm-chart.yml @@ -1,30 +1,30 @@ -name: Deploy Helm Chart +# name: Deploy Helm Chart -on: - workflow_dispatch: - pull_request: - branches: ["release"] - paths: - - stable/jfrog-platform/** - push: - branches: ["release"] - paths: - - stable/jfrog-platform/** +# on: +# workflow_dispatch: +# pull_request: +# branches: ["release"] +# paths: +# - stable/jfrog-platform/** +# push: +# branches: ["release"] +# paths: +# - stable/jfrog-platform/** -jobs: - check: - runs-on: forge-amd64-dagger - steps: - - name: Checkout - uses: actions/checkout@v4 - - name: Checkout elevation-data - uses: actions/checkout@v4 - with: - repository: fastly/elevation-data +# jobs: +# check: +# runs-on: forge-amd64-dagger +# steps: +# - name: Checkout +# uses: actions/checkout@v4 +# - name: Checkout elevation-data +# uses: actions/checkout@v4 +# with: +# repository: fastly/elevation-data - - name: Conftest - uses: fastly/forge/actions/dagger@actions-v1 - with: - verb: call - module: .forge/dagger-modules/standard/registry/ - args: --vault-token="env:VAULT_TOKEN" helm elevation-conftest --chart ./jfrog-helm-charts/stable/jfrog-platform/ --additional-values ./elevation-data/workloads/dev-usc1/jfrog-platform/jfrog-platform.yaml +# - name: Conftest +# uses: fastly/forge/actions/dagger@actions-v1 +# with: +# verb: call +# module: .forge/dagger-modules/standard/registry/ +# args: --vault-token="env:VAULT_TOKEN" helm elevation-conftest --chart ./jfrog-helm-charts/stable/jfrog-platform/ --additional-values ./elevation-data/workloads/dev-usc1/jfrog-platform/jfrog-platform.yaml From 6b5604b75e9ea2ccc057338af763c9a8c2a1eaa1 Mon Sep 17 00:00:00 2001 From: grig777 Date: Mon, 3 Mar 2025 13:34:19 -0500 Subject: [PATCH 3/4] Fix merge --- .github/workflows/deploy-helm-chart.yml | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/.github/workflows/deploy-helm-chart.yml b/.github/workflows/deploy-helm-chart.yml index 5b757d5dc..885b03b3c 100644 --- a/.github/workflows/deploy-helm-chart.yml +++ b/.github/workflows/deploy-helm-chart.yml @@ -22,9 +22,18 @@ # with: # repository: fastly/elevation-data -# - name: Conftest -# uses: fastly/forge/actions/dagger@actions-v1 -# with: -# verb: call -# module: .forge/dagger-modules/standard/registry/ -# args: --vault-token="env:VAULT_TOKEN" helm elevation-conftest --chart ./jfrog-helm-charts/stable/jfrog-platform/ --additional-values ./elevation-data/workloads/dev-usc1/jfrog-platform/jfrog-platform.yaml + # - name: Conftest + # uses: fastly/forge/actions/dagger@actions-v1 + # if: github.ref != 'refs/heads/release' + # with: + # verb: call + # module: .forge/dagger-modules/standard/registry/ + # args: --vault-token="env:VAULT_TOKEN" helm elevation-conftest --chart ./jfrog-helm-charts/stable/jfrog-platform/ --additional-values ./elevation-data/workloads/dev-usc1/jfrog-platform/jfrog-platform.yaml + + # - name: Conftest + # uses: fastly/forge/actions/dagger@actions-v1 + # if: github.ref == 'refs/heads/release' + # with: + # verb: call + # module: .forge/dagger-modules/standard/registry/ + # args: --vault-token="env:VAULT_TOKEN" helm push --chart ./jfrog-helm-charts/stable/jfrog-platform/ --additional-values ./elevation-data/workloads/dev-usc1/jfrog-platform/jfrog-platform.yaml From 2da305440ddcf61297544ded71075d8ab79e191c Mon Sep 17 00:00:00 2001 From: grig777 Date: Mon, 3 Mar 2025 13:40:33 -0500 Subject: [PATCH 4/4] Add internal change log --- stable/jfrog-platform/INTERNAL_CHAGELOG.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 stable/jfrog-platform/INTERNAL_CHAGELOG.md diff --git a/stable/jfrog-platform/INTERNAL_CHAGELOG.md b/stable/jfrog-platform/INTERNAL_CHAGELOG.md new file mode 100644 index 000000000..0efd862f0 --- /dev/null +++ b/stable/jfrog-platform/INTERNAL_CHAGELOG.md @@ -0,0 +1,2 @@ +* Added rabbitmq dependency locally to modify chart template to configure EmtpyDisk size to pass elevation conftest +* Added xray local dependency until templating issue fixed when rabbitmq credential secret set https://github.com/jfrog/charts/pull/1968 \ No newline at end of file