diff --git a/README.containers.md b/README.containers.md index 296b914..755bf9e 100644 --- a/README.containers.md +++ b/README.containers.md @@ -18,6 +18,7 @@ There are a number of containers that make up the application. Here are quick de 12. [Storage Container (blackduck-storage)](#-storage-container-blackduck-storage) 13. [Web App Container (blackduck-webapp)](#-web-app-container-blackduck-webapp) 14. [Web Server Container (blackduck-nginx)](#-web-server-container-blackduck-nginx) +15. [RL Service Container (rl-service)](#-rl-service-container-rl-service) # Web App Container (blackduck-webapp) ---- @@ -650,3 +651,40 @@ This container will need to expose port 8443 to other containers that will link This container runs as UID 100. If the container is started as UID 0 (root) then the user will be switched to UID 100:root before executing its main process. This container is also able to be started as a random UID as long as it is also started within the root group (GID/fsGroup 0). + +# RL Service Container (rl-service) +---- + +## Container Description + +This container analyzes binary files for malware. +This container is only used if Black Duck - ReversingLabs is enabled. + +## Scalability + +This container can be scaled. + +## Links/Ports + +This container needs to connect to these containers/services: +* cfssl +* logstash +* rabbitmq +* storage +* scan +* registration + +## Alternate Host Name Environment Variables + +It may be useful to set host names for these containers, that are not the Docker Swarm defaults, when running in other types of orchestrations. These environment variables can be set to override the default host names: + +* cfssl: $HUB_CFSSL_HOST +* logstash: $HUB_LOGSTASH_HOST +* rabbitmq: $RABBIT_MQ_HOST +* storage: $BLACKDUCK_STORAGE_HOST +* scan: $HUB_SCAN_HOST +* registration: $HUB_REGISTRATION_HOST + +## Users/Groups + +This container runs as UID 1000 (rlservice username) diff --git a/README.md b/README.md index 9059892..47e22f9 100644 --- a/README.md +++ b/README.md @@ -2,9 +2,9 @@ This repository contains orchestration files and documentation for deploying Black Duck Docker containers. -## Location of Black Duck 2024.1.1 archive: +## Location of Black Duck 2024.4.0 archive: -https://github.com/blackducksoftware/hub/archive/v2024.1.1.tar.gz +https://github.com/blackducksoftware/hub/archive/v2024.4.0.tar.gz NOTE: @@ -44,6 +44,7 @@ https://github.com/blackducksoftware/hub/releases * https://hub.docker.com/r/sigsynopsys/bdba-worker/ * https://hub.docker.com/r/blackducksoftware/rabbitmq/ * https://hub.docker.com/r/blackducksoftware/blackduck-webui/ +* https://hub.docker.com/r/blackducksoftware/rl-service/ # Running Black Duck in Docker diff --git a/docker-swarm/README.md b/docker-swarm/README.md index 3d90dc3..c11faa2 100644 --- a/docker-swarm/README.md +++ b/docker-swarm/README.md @@ -95,6 +95,21 @@ them unless this flag is added to the command above: --with-registry-auth ``` +## Running with ReversingLabs Enabled + +Note: This command might require being run as either a root user, a user in the docker group, or with 'sudo'. + +``` +docker stack deploy --compose-file docker-compose.yml -c docker-compose.rl.yml hub +``` + +There are some versions of docker where if the images live in a private repository, docker stack will not pull +them unless this flag is added to the command above: + +``` +--with-registry-auth +``` + ## Running with External PostgreSQL Hub can be run using a PostgreSQL instance other than the provided hub-postgres docker image. @@ -231,6 +246,24 @@ Added definition: reservations: {cpus: '1', memory: 4096M} ``` +### Changing the Default ReversingLabs Memory Limits + +The default memory limits allow files up to 6GB to successfully scan. Additional memory and CPUs will potentially speed up scan times. + +The following configuration example will update the container memory limits from 6GB to 8GB. These configuration values can be changed +in the 'docker-compose.rl.yml': + + +Added definition: + +``` + rlservice: + deploy: + resources: + limits: {cpus: '2', memory: 8192M} + reservations: {cpus: '2', memory: 8192M} +``` + ## Configuration There are several additional options that can be user-configured. This section describes these: @@ -266,6 +299,7 @@ There are currently several containers that need access to services hosted by Bl * registration * scan * webapp +* rl-service If a proxy is required for external internet access you'll need to configure it. @@ -291,6 +325,7 @@ There are several containers that will require the proxy password: * registration * scan * webapp +* rl-service #### LDAP Trust Store Password @@ -312,6 +347,7 @@ The proxy password secret will need to be added to the services: * registration * scan * webapp +* rl-service In each of these service sections, you'll need to add: @@ -524,6 +560,8 @@ For each of the services below, add the secret by * webapp * registration +Note: The rl-service does not support proxies using certificates. + ``` secrets: - HUB_PROXY_CERT_FILE diff --git a/docker-swarm/bin/hub_add_replication_user.sh b/docker-swarm/bin/hub_add_replication_user.sh index 8ff448c..dfba44b 100755 --- a/docker-swarm/bin/hub_add_replication_user.sh +++ b/docker-swarm/bin/hub_add_replication_user.sh @@ -3,7 +3,7 @@ set -e TIMEOUT=${TIMEOUT:-10} -HUB_POSTGRES_VERSION=${HUB_POSTGRES_VERSION:-14-1.21} +HUB_POSTGRES_VERSION=${HUB_POSTGRES_VERSION:-14-1.22} HUB_DATABASE_IMAGE_NAME=${HUB_DATABASE_IMAGE_NAME:-postgres} function fail() { diff --git a/docker-swarm/bin/hub_create_data_dump.sh b/docker-swarm/bin/hub_create_data_dump.sh index 58e258b..1669df6 100755 --- a/docker-swarm/bin/hub_create_data_dump.sh +++ b/docker-swarm/bin/hub_create_data_dump.sh @@ -5,8 +5,8 @@ # 2. The database container has been properly initialized. HUB_DATABASE_IMAGE_NAME=${HUB_DATABASE_IMAGE_NAME:-postgres} -HUB_POSTGRES_VERSION=${HUB_POSTGRES_VERSION:-14-1.21} -HUB_VERSION=${HUB_VERSION:-2024.1.1} +HUB_POSTGRES_VERSION=${HUB_POSTGRES_VERSION:-14-1.22} +HUB_VERSION=${HUB_VERSION:-2024.4.0} OPT_FORCE= OPT_LIVE_SYSTEM= OPT_MAX_CPU=${MAX_CPU:-1} diff --git a/docker-swarm/bin/hub_db_migrate.sh b/docker-swarm/bin/hub_db_migrate.sh index e7f5b35..d698090 100755 --- a/docker-swarm/bin/hub_db_migrate.sh +++ b/docker-swarm/bin/hub_db_migrate.sh @@ -14,7 +14,7 @@ set -o errexit HUB_DATABASE_IMAGE_NAME=${HUB_DATABASE_IMAGE_NAME:-postgres} -HUB_POSTGRES_VERSION=${HUB_POSTGRES_VERSION:-14-1.21} +HUB_POSTGRES_VERSION=${HUB_POSTGRES_VERSION:-14-1.22} OPT_MAX_CPU=${MAX_CPU:-1} OPT_NO_DATABASE=${NO_DATABASE:-} OPT_NO_STORAGE=${NO_STORAGE:-} diff --git a/docker-swarm/bin/hub_replication_changepassword.sh b/docker-swarm/bin/hub_replication_changepassword.sh index 4324053..1484175 100755 --- a/docker-swarm/bin/hub_replication_changepassword.sh +++ b/docker-swarm/bin/hub_replication_changepassword.sh @@ -3,7 +3,7 @@ set -e TIMEOUT=${TIMEOUT:-10} -HUB_POSTGRES_VERSION=${HUB_POSTGRES_VERSION:-14-1.21} +HUB_POSTGRES_VERSION=${HUB_POSTGRES_VERSION:-14-1.22} HUB_DATABASE_IMAGE_NAME=${HUB_DATABASE_IMAGE_NAME:-postgres} function fail() { diff --git a/docker-swarm/bin/hub_reportdb_changepassword.sh b/docker-swarm/bin/hub_reportdb_changepassword.sh index cc36770..158a1c1 100755 --- a/docker-swarm/bin/hub_reportdb_changepassword.sh +++ b/docker-swarm/bin/hub_reportdb_changepassword.sh @@ -3,7 +3,7 @@ set -e TIMEOUT=${TIMEOUT:-10} -HUB_POSTGRES_VERSION=${HUB_POSTGRES_VERSION:-14-1.21} +HUB_POSTGRES_VERSION=${HUB_POSTGRES_VERSION:-14-1.22} HUB_DATABASE_IMAGE_NAME=${HUB_DATABASE_IMAGE_NAME:-postgres} function fail() { diff --git a/docker-swarm/bin/system_check.sh b/docker-swarm/bin/system_check.sh index b32eae5..785827a 100755 --- a/docker-swarm/bin/system_check.sh +++ b/docker-swarm/bin/system_check.sh @@ -41,7 +41,7 @@ set -o noglob readonly NOW="$(date +"%Y%m%dT%H%M%S%z")" readonly NOW_ZULU="$(date -u +"%Y%m%dT%H%M%SZ")" -readonly HUB_VERSION="${HUB_VERSION:-2024.1.1}" +readonly HUB_VERSION="${HUB_VERSION:-2024.4.0}" readonly OUTPUT_FILE="${SYSTEM_CHECK_OUTPUT_FILE:-system_check_${NOW}.txt}" readonly PROPERTIES_FILE="${SYSTEM_CHECK_PROPERTIES_FILE:-${OUTPUT_FILE%.txt}.properties}" readonly SUMMARY_FILE="${SYSTEM_CHECK_SUMMARY_FILE:-${OUTPUT_FILE%.txt}_summary.properties}" @@ -89,7 +89,6 @@ declare -ar REQ_CONTAINER_SIZES_G4=( "hub_uploadcache=512 512 512 1024 1536 2048 2048" "hub_webapp=3584 4048 5120 6144 20480 20480 20480" "hub_webserver=512 512 512 1024 2048 2048 2048" - "hub_webui=512 512 512 1024 1536 1536 1536" ) declare -ar REQ_CONTAINER_SIZES_G3=( # "SERVICE=10sph 120sph 250sph 500sph 1000sph 1500sph 2000sph" @@ -114,7 +113,6 @@ declare -ar REQ_CONTAINER_SIZES_G3=( "hub_storage=1024 1024 1024 1024 1024 1024 1024" "hub_webapp=3584 5120 8192 11264 15360 18432 18432" "hub_webserver=512 512 512 1024 2048 3072 3072" - "hub_webui=512 512 512 1024 1536 2048 2048" ) declare -ar REQ_CONTAINER_SIZES_G2=( # "SERVICE=compose swarm kubernetes" @@ -178,7 +176,7 @@ declare -ar SPH_MEM_SIZES_G4=( "hub_redis=900 900 1844 3687 4608 7373 9216" "hub_registration=922 1200 1200 1844 2765 2765 2765" "hub_scan=4608 9216 9216 9216 13824 13824 13824" - "hub_storage=512 2304 2765 3687 7373 7373 9100" + "hub_storage=512 1536 1996 3072 6554 6554 8192" "hub_webapp=3226 3608 4608 5530 18432 18432 18432" ) declare -ar SPH_MEM_SIZES_G3=( @@ -210,7 +208,6 @@ declare -ar TS_MEM_SIZES_G2=( "hub_scan=2048 2048 8192" # sic "hub_webapp=2048 4096 8192" "hub_webserver=512 2048 2048" - "hub_webui=640 640 1024" ) declare -ar TS_MEM_SIZES_G1=( # "SERVICE=small medium large" # in MB @@ -317,7 +314,6 @@ declare -ar REPLICABLE=( "hub_storage=$FAIL" "hub_webapp=$FAIL" "hub_webserver=$WARN" - #"hub_webui=$PASS" ) readonly MB=1048576 @@ -337,13 +333,13 @@ USE_NETWORK_TESTS="$TRUE" readonly NETWORK_TESTS_SKIPPED="*** Network Tests Skipped at command line ***" # Hostnames Black Duck uses within the docker network -readonly HUB_RESERVED_HOSTNAMES="postgres postgres-upgrader postgres-waiter authentication webapp webui scan jobrunner cfssl logstash \ +readonly HUB_RESERVED_HOSTNAMES="postgres postgres-upgrader postgres-waiter authentication webapp scan jobrunner cfssl logstash \ registration webserver documentation redis bomengine rabbitmq matchengine integration" readonly CONTAINERS_WITHOUT_CURL="nginx|postgres|postgres-upgrader|postgres-waiter|alert-database|cadvisor" # Versioned (not "1.0.x") blackducksoftware images -readonly VERSIONED_HUB_IMAGES="blackduck-authentication|blackduck-bomengine|blackduck-documentation|blackduck-jobrunner|blackduck-matchengine|blackduck-redis|blackduck-registration|blackduck-scan|blackduck-storage|blackduck-webapp|blackduck-webui" +readonly VERSIONED_HUB_IMAGES="blackduck-authentication|blackduck-bomengine|blackduck-documentation|blackduck-jobrunner|blackduck-matchengine|blackduck-redis|blackduck-registration|blackduck-scan|blackduck-storage|blackduck-webapp" readonly VERSIONED_BDBA_IMAGES="bdba-worker" readonly VERSIONED_ALERT_IMAGES="blackduck-alert" @@ -2510,7 +2506,7 @@ _get_container_size_info() { case "$hub_service" in (hub_redis*) if [[ "$hub_service" == hub_redissentinel* ]]; then memvar="container_memory"; else memvar="BLACKDUCK_REDIS_MAXMEMORY"; fi;; - (hub_postgres* | hub_cfssl | hub_rabbitmq | hub_webserver | hub_webui) + (hub_postgres* | hub_cfssl | hub_rabbitmq | hub_webserver) memvar="container_memory";; (*) memvar="HUB_MAX_MEMORY";; @@ -2580,8 +2576,6 @@ _get_container_size_info() { service="hub_storage";; (blackducksoftware/blackduck-webapp*) service="hub_webapp";; - (blackducksoftware/blackduck-webui*) - service="hub_webui"; memvar="container_memory";; (blackducksoftware/blackduck-nginx*) service="hub_webserver"; memvar="container_memory";; (blackducksoftware/blackduck-alert*) diff --git a/docker-swarm/blackduck-config.env b/docker-swarm/blackduck-config.env index c6d6383..af79ad7 100644 --- a/docker-swarm/blackduck-config.env +++ b/docker-swarm/blackduck-config.env @@ -24,7 +24,7 @@ BLACKDUCK_CORS_ALLOW_CREDENTIALS_PROP_NAME= # Do not change HUB_PRODUCT_NAME=BLACK_DUCK -HUB_VERSION=2024.1.1 +HUB_VERSION=2024.4.0 # Specify any property-specific overrides here # @@ -117,6 +117,9 @@ BLACKDUCK_DETECT_MAJOR_VERSION_MIN=8 # pass-thru ev for toggling JSON structured logging STRUCTURED_LOGGING +# Enable/disable the alerting of certificate expiration +BLACKDUCK_CERT_EXPIRATION_ALERT_ENABLED=true + # Enable/disable secrets encryption. Once enabled, it can never be disabled. Use docker-compose.encryption.yml to # specify the root, backup and previous seeds, once secrets encryption is enabled. SYNOPSYS_CRYPTO_ENABLED=false diff --git a/docker-swarm/docker-compose.bdba.yml b/docker-swarm/docker-compose.bdba.yml index 928ab6a..380ad92 100644 --- a/docker-swarm/docker-compose.bdba.yml +++ b/docker-swarm/docker-compose.bdba.yml @@ -5,7 +5,7 @@ version: '3.6' services: binaryscanner: - image: sigsynopsys/bdba-worker:2023.12.3 + image: sigsynopsys/bdba-worker:2024.3.0 env_file: [hub-bdba.env] entrypoint: /docker-entrypoint.sh healthcheck: diff --git a/docker-swarm/docker-compose.dbmigrate.yml b/docker-swarm/docker-compose.dbmigrate.yml index 8562c09..bed3051 100644 --- a/docker-swarm/docker-compose.dbmigrate.yml +++ b/docker-swarm/docker-compose.dbmigrate.yml @@ -1,7 +1,7 @@ version: '3.6' services: cfssl: - image: blackducksoftware/blackduck-cfssl:1.0.25 + image: blackducksoftware/blackduck-cfssl:1.0.26 volumes: - cert-volume:/etc/cfssl healthcheck: @@ -12,7 +12,7 @@ services: user: 'cfssl:root' logstash: - image: blackducksoftware/blackduck-logstash:1.0.35 + image: blackducksoftware/blackduck-logstash:1.0.36 volumes: - log-volume:/var/lib/logstash/data healthcheck: @@ -23,7 +23,7 @@ services: user: 'logstash:root' postgres: - image: blackducksoftware/blackduck-postgres:14-1.21 + image: blackducksoftware/blackduck-postgres:14-1.22 volumes: - postgres96-data-volume:/bitnami/postgresql - postgres-conf-volume:/opt/bitnami/postgresql/conf diff --git a/docker-swarm/docker-compose.externaldb.ubi.yml b/docker-swarm/docker-compose.externaldb.ubi.yml index d824ef3..3cf8fae 100644 --- a/docker-swarm/docker-compose.externaldb.ubi.yml +++ b/docker-swarm/docker-compose.externaldb.ubi.yml @@ -13,7 +13,7 @@ x-long-start-period: &long-start-period services: authentication: user: authentication:root - image: blackducksoftware/blackduck-authentication:2024.1.1_ubi9.3 + image: blackducksoftware/blackduck-authentication:2024.4.0_ubi9.3 volumes: - authentication-volume:/opt/blackduck/hub/hub-authentication/ldap - {type: tmpfs, target: /opt/blackduck/hub/hub-authentication/security} @@ -35,7 +35,7 @@ services: restart_policy: {condition: on-failure, delay: 5s, window: 60s} webapp: user: webapp:root - image: blackducksoftware/blackduck-webapp:2024.1.1_ubi9.3 + image: blackducksoftware/blackduck-webapp:2024.4.0_ubi9.3 volumes: - log-volume:/opt/blackduck/hub/logs - {type: tmpfs, target: /opt/blackduck/hub/hub-webapp/security} @@ -57,7 +57,7 @@ services: restart_policy: {condition: on-failure, delay: 5s, window: 60s} scan: user: scan:root - image: blackducksoftware/blackduck-scan:2024.1.1_ubi9.3 + image: blackducksoftware/blackduck-scan:2024.4.0_ubi9.3 env_file: [blackduck-config.env , hub-postgres.env] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'https://localhost:8443/api/health-checks/liveness', @@ -78,7 +78,7 @@ services: restart_policy: {condition: on-failure, delay: 5s, window: 60s} storage: user: storage:root - image: blackducksoftware/blackduck-storage:2024.1.1_ubi9.3 + image: blackducksoftware/blackduck-storage:2024.4.0_ubi9.3 env_file: [blackduck-config.env , hub-postgres.env] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'https://localhost:8443/api/health-checks/liveness', @@ -100,7 +100,7 @@ services: restart_policy: {condition: on-failure, delay: 5s, window: 60s} jobrunner: user: jobrunner:root - image: blackducksoftware/blackduck-jobrunner:2024.1.1_ubi9.3 + image: blackducksoftware/blackduck-jobrunner:2024.4.0_ubi9.3 env_file: [blackduck-config.env , hub-postgres.env] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'https://localhost:8443/health-checks/liveness', @@ -124,7 +124,7 @@ services: mode: replicated restart_policy: {condition: on-failure, delay: 5s, window: 60s} cfssl: - image: blackducksoftware/blackduck-cfssl:1.0.25_ubi9.3 + image: blackducksoftware/blackduck-cfssl:1.0.26_ubi9.3 volumes: ['cert-volume:/etc/cfssl'] env_file: [blackduck-config.env] healthcheck: @@ -137,7 +137,7 @@ services: mode: replicated restart_policy: {condition: on-failure, delay: 5s, window: 60s} logstash: - image: blackducksoftware/blackduck-logstash:1.0.35_ubi9.3 + image: blackducksoftware/blackduck-logstash:1.0.36_ubi9.3 volumes: ['log-volume:/var/lib/logstash/data'] env_file: [blackduck-config.env] healthcheck: @@ -152,7 +152,7 @@ services: mode: replicated restart_policy: {condition: on-failure, delay: 5s, window: 60s} registration: - image: blackducksoftware/blackduck-registration:2024.1.1_ubi9.3 + image: blackducksoftware/blackduck-registration:2024.4.0_ubi9.3 volumes: - config-volume:/opt/blackduck/hub/hub-registration/config - {type: tmpfs, target: /opt/blackduck/hub/hub-registration/security} @@ -173,7 +173,7 @@ services: mode: replicated restart_policy: {condition: on-failure, delay: 5s, window: 60s} webserver: - image: blackducksoftware/blackduck-nginx:2.0.66_ubi9.3 + image: blackducksoftware/blackduck-nginx:2024.4.0_ubi9.3 ports: ['443:8443'] env_file: [hub-webserver.env, blackduck-config.env] environment: @@ -190,23 +190,9 @@ services: deploy: mode: replicated restart_policy: {condition: on-failure, delay: 15s, window: 60s} - webui: - image: blackducksoftware/blackduck-webui:2024.1.1_ubi9.3 - healthcheck: - test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'https://localhost:8443/health-checks/liveness', - /opt/blackduck/hub/hub-ui/security/root.crt] - interval: 30s - timeout: 10s - retries: 5 - user: webui:root - volumes: - - {type: tmpfs, target: /opt/blackduck/hub/hub-ui/security} - deploy: - mode: replicated - restart_policy: {condition: on-failure, delay: 15s, window: 60s} documentation: - image: blackducksoftware/blackduck-documentation:2024.1.1_ubi9.3 + image: blackducksoftware/blackduck-documentation:2024.4.0_ubi9.3 env_file: [blackduck-config.env] user: documentation:root environment: @@ -223,7 +209,7 @@ services: mode: replicated restart_policy: {condition: on-failure, delay: 5s, window: 60s} redis: - image: blackducksoftware/blackduck-redis:2024.1.1_ubi9.3 + image: blackducksoftware/blackduck-redis:2024.4.0_ubi9.3 env_file: [blackduck-config.env] environment: HUB_JOBRUNNER_HOST: 'tasks.jobrunner.' @@ -242,7 +228,7 @@ services: deploy: restart_policy: {condition: any} bomengine: - image: blackducksoftware/blackduck-bomengine:2024.1.1_ubi9.3 + image: blackducksoftware/blackduck-bomengine:2024.4.0_ubi9.3 env_file: [blackduck-config.env , hub-postgres.env] environment: << : *pg-usage-settings @@ -263,7 +249,7 @@ services: mode: replicated restart_policy: {condition: on-failure, delay: 5s, window: 60s} matchengine: - image: blackducksoftware/blackduck-matchengine:2024.1.1_ubi9.3 + image: blackducksoftware/blackduck-matchengine:2024.4.0_ubi9.3 user: matchengine:root healthcheck: test: [ CMD, /usr/local/bin/docker-healthcheck.sh, 'https://localhost:8443/api/health-checks/liveness', @@ -284,7 +270,7 @@ services: HUB_JOBRUNNER_HOST: 'tasks.jobrunner.' SYNOPSYS_CRYPTO_PROFILE: 'SWARM' rabbitmq: - image: blackducksoftware/rabbitmq:1.2.36_ubi9.3 + image: blackducksoftware/rabbitmq:1.2.37_ubi9.3 hostname: rabbitmq volumes: - rabbitmq-data-volume:/var/lib/rabbitmq diff --git a/docker-swarm/docker-compose.externaldb.yml b/docker-swarm/docker-compose.externaldb.yml index 6276259..2696740 100644 --- a/docker-swarm/docker-compose.externaldb.yml +++ b/docker-swarm/docker-compose.externaldb.yml @@ -13,7 +13,7 @@ x-long-start-period: &long-start-period services: authentication: user: authentication:root - image: blackducksoftware/blackduck-authentication:2024.1.1 + image: blackducksoftware/blackduck-authentication:2024.4.0 volumes: - authentication-volume:/opt/blackduck/hub/hub-authentication/ldap - {type: tmpfs, target: /opt/blackduck/hub/hub-authentication/security} @@ -35,7 +35,7 @@ services: restart_policy: {condition: on-failure, delay: 5s, window: 60s} webapp: user: webapp:root - image: blackducksoftware/blackduck-webapp:2024.1.1 + image: blackducksoftware/blackduck-webapp:2024.4.0 volumes: - log-volume:/opt/blackduck/hub/logs - {type: tmpfs, target: /opt/blackduck/hub/hub-webapp/security} @@ -57,7 +57,7 @@ services: restart_policy: {condition: on-failure, delay: 5s, window: 60s} scan: user: scan:root - image: blackducksoftware/blackduck-scan:2024.1.1 + image: blackducksoftware/blackduck-scan:2024.4.0 env_file: [blackduck-config.env , hub-postgres.env] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'https://localhost:8443/api/health-checks/liveness', @@ -78,7 +78,7 @@ services: restart_policy: {condition: on-failure, delay: 5s, window: 60s} storage: user: storage:root - image: blackducksoftware/blackduck-storage:2024.1.1 + image: blackducksoftware/blackduck-storage:2024.4.0 env_file: [blackduck-config.env , hub-postgres.env] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'https://localhost:8443/api/health-checks/liveness', @@ -100,7 +100,7 @@ services: restart_policy: {condition: on-failure, delay: 5s, window: 60s} jobrunner: user: jobrunner:root - image: blackducksoftware/blackduck-jobrunner:2024.1.1 + image: blackducksoftware/blackduck-jobrunner:2024.4.0 env_file: [blackduck-config.env , hub-postgres.env] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'https://localhost:8443/health-checks/liveness', @@ -124,7 +124,7 @@ services: mode: replicated restart_policy: {condition: on-failure, delay: 5s, window: 60s} cfssl: - image: blackducksoftware/blackduck-cfssl:1.0.25 + image: blackducksoftware/blackduck-cfssl:1.0.26 volumes: ['cert-volume:/etc/cfssl'] env_file: [blackduck-config.env] healthcheck: @@ -137,7 +137,7 @@ services: mode: replicated restart_policy: {condition: on-failure, delay: 5s, window: 60s} logstash: - image: blackducksoftware/blackduck-logstash:1.0.35 + image: blackducksoftware/blackduck-logstash:1.0.36 volumes: ['log-volume:/var/lib/logstash/data'] env_file: [blackduck-config.env] healthcheck: @@ -152,7 +152,7 @@ services: mode: replicated restart_policy: {condition: on-failure, delay: 5s, window: 60s} registration: - image: blackducksoftware/blackduck-registration:2024.1.1 + image: blackducksoftware/blackduck-registration:2024.4.0 volumes: - config-volume:/opt/blackduck/hub/hub-registration/config - {type: tmpfs, target: /opt/blackduck/hub/hub-registration/security} @@ -173,7 +173,7 @@ services: mode: replicated restart_policy: {condition: on-failure, delay: 5s, window: 60s} webserver: - image: blackducksoftware/blackduck-nginx:2.0.66 + image: blackducksoftware/blackduck-nginx:2024.4.0 ports: ['443:8443'] env_file: [hub-webserver.env, blackduck-config.env] environment: @@ -190,23 +190,9 @@ services: deploy: mode: replicated restart_policy: {condition: on-failure, delay: 15s, window: 60s} - webui: - image: blackducksoftware/blackduck-webui:2024.1.1 - healthcheck: - test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'https://localhost:8443/health-checks/liveness', - /opt/blackduck/hub/hub-ui/security/root.crt] - interval: 30s - timeout: 10s - retries: 5 - user: webui:root - volumes: - - {type: tmpfs, target: /opt/blackduck/hub/hub-ui/security} - deploy: - mode: replicated - restart_policy: {condition: on-failure, delay: 15s, window: 60s} documentation: - image: blackducksoftware/blackduck-documentation:2024.1.1 + image: blackducksoftware/blackduck-documentation:2024.4.0 env_file: [blackduck-config.env] user: documentation:root environment: @@ -223,7 +209,7 @@ services: mode: replicated restart_policy: {condition: on-failure, delay: 5s, window: 60s} redis: - image: blackducksoftware/blackduck-redis:2024.1.1 + image: blackducksoftware/blackduck-redis:2024.4.0 env_file: [blackduck-config.env] environment: HUB_JOBRUNNER_HOST: 'tasks.jobrunner.' @@ -242,7 +228,7 @@ services: deploy: restart_policy: {condition: any} bomengine: - image: blackducksoftware/blackduck-bomengine:2024.1.1 + image: blackducksoftware/blackduck-bomengine:2024.4.0 env_file: [blackduck-config.env , hub-postgres.env] environment: << : *pg-usage-settings @@ -263,7 +249,7 @@ services: mode: replicated restart_policy: {condition: on-failure, delay: 5s, window: 60s} matchengine: - image: blackducksoftware/blackduck-matchengine:2024.1.1 + image: blackducksoftware/blackduck-matchengine:2024.4.0 user: matchengine:root healthcheck: test: [ CMD, /usr/local/bin/docker-healthcheck.sh, 'https://localhost:8443/api/health-checks/liveness', @@ -284,7 +270,7 @@ services: HUB_JOBRUNNER_HOST: 'tasks.jobrunner.' SYNOPSYS_CRYPTO_PROFILE: 'SWARM' rabbitmq: - image: blackducksoftware/rabbitmq:1.2.36 + image: blackducksoftware/rabbitmq:1.2.37 hostname: rabbitmq volumes: - rabbitmq-data-volume:/var/lib/rabbitmq diff --git a/docker-swarm/docker-compose.integration.externaldb.ubi.yml b/docker-swarm/docker-compose.integration.externaldb.ubi.yml index 4b8d76d..656b3d2 100644 --- a/docker-swarm/docker-compose.integration.externaldb.ubi.yml +++ b/docker-swarm/docker-compose.integration.externaldb.ubi.yml @@ -5,7 +5,7 @@ version: '3.6' services: integration: - image: blackducksoftware/blackduck-integration:2024.1.1_ubi9.3 + image: blackducksoftware/blackduck-integration:2024.4.0_ubi9.3 user: integration:root env_file: [blackduck-config.env , hub-postgres.env] healthcheck: diff --git a/docker-swarm/docker-compose.integration.externaldb.yml b/docker-swarm/docker-compose.integration.externaldb.yml index ad0bb64..bdfb3f8 100644 --- a/docker-swarm/docker-compose.integration.externaldb.yml +++ b/docker-swarm/docker-compose.integration.externaldb.yml @@ -5,7 +5,7 @@ version: '3.6' services: integration: - image: blackducksoftware/blackduck-integration:2024.1.1 + image: blackducksoftware/blackduck-integration:2024.4.0 user: integration:root env_file: [blackduck-config.env , hub-postgres.env] healthcheck: diff --git a/docker-swarm/docker-compose.integration.yml b/docker-swarm/docker-compose.integration.yml index bd0778a..6cce622 100644 --- a/docker-swarm/docker-compose.integration.yml +++ b/docker-swarm/docker-compose.integration.yml @@ -5,7 +5,7 @@ version: '3.6' services: integration: - image: blackducksoftware/blackduck-integration:2024.1.1 + image: blackducksoftware/blackduck-integration:2024.4.0 user: integration:root env_file: [blackduck-config.env ] healthcheck: diff --git a/docker-swarm/docker-compose.readonly.yml b/docker-swarm/docker-compose.readonly.yml index 71222cf..b255255 100644 --- a/docker-swarm/docker-compose.readonly.yml +++ b/docker-swarm/docker-compose.readonly.yml @@ -75,11 +75,6 @@ services: - /var/run - /opt/blackduck/hub/webserver/filebeat - /opt/blackduck/hub/webserver/logrotate - webui: - read_only: true - volumes: - - /opt/blackduck/hub/hub-ui/logs - - /opt/blackduck/hub/filebeat documentation: read_only: true volumes: diff --git a/docker-swarm/docker-compose.redis.sentinel.yml b/docker-swarm/docker-compose.redis.sentinel.yml index b9da831..eb8a08d 100644 --- a/docker-swarm/docker-compose.redis.sentinel.yml +++ b/docker-swarm/docker-compose.redis.sentinel.yml @@ -1,7 +1,7 @@ version: '3.6' services: redisslave: - image: blackducksoftware/blackduck-redis:2024.1.1 + image: blackducksoftware/blackduck-redis:2024.4.0 env_file: [blackduck-config.env] user: redis:root stop_grace_period: 60s @@ -25,7 +25,7 @@ services: limits: {memory: 2048M} reservations: {memory: 2048M} redissentinel1: - image: blackducksoftware/blackduck-redis:2024.1.1 + image: blackducksoftware/blackduck-redis:2024.4.0 env_file: [blackduck-config.env] hostname: redissentinel1 user: redis:root @@ -47,7 +47,7 @@ services: limits: {memory: 32M} reservations: {memory: 32M} redissentinel2: - image: blackducksoftware/blackduck-redis:2024.1.1 + image: blackducksoftware/blackduck-redis:2024.4.0 env_file: [blackduck-config.env] hostname: redissentinel2 user: redis:root @@ -69,7 +69,7 @@ services: limits: {memory: 32M} reservations: {memory: 32M} redissentinel3: - image: blackducksoftware/blackduck-redis:2024.1.1 + image: blackducksoftware/blackduck-redis:2024.4.0 env_file: [blackduck-config.env] hostname: redissentinel3 user: redis:root diff --git a/docker-swarm/docker-compose.rl.yml b/docker-swarm/docker-compose.rl.yml new file mode 100644 index 0000000..74aafab --- /dev/null +++ b/docker-swarm/docker-compose.rl.yml @@ -0,0 +1,24 @@ +version: '3.6' +# Starting with the 2019.2.0 release: DO NOT EDIT THIS FILE!!! +# ADD ANY OF YOUR OVERRIDES IN THE docker-compose.local-overrides.yml FILE +# Refer to the Release Notes or Installation Guide for more information. +services: + rlservice: + image: blackducksoftware/rl-service:2024.4.0 + user: rlservice:root + links: [cfssl, rabbitmq, logstash, webserver, storage] + env_file: [hub-rl.env, blackduck-config.env] + entrypoint: /usr/local/bin/docker-entrypoint.sh + healthcheck: + test: curl --fail http://localhost:8080/rlservice/health || exit 1 + interval: 30s + timeout: 60s + retries: 15 + start_period: 7200s + deploy: + mode: replicated + replicas: 1 + restart_policy: {condition: on-failure, delay: 5s, window: 60s} + resources: + limits: {memory: 6144M, cpus: '2'} + reservations: {memory: 6144M, cpus: '2'} diff --git a/docker-swarm/docker-compose.ubi.yml b/docker-swarm/docker-compose.ubi.yml index e0b90ff..2812ade 100644 --- a/docker-swarm/docker-compose.ubi.yml +++ b/docker-swarm/docker-compose.ubi.yml @@ -12,7 +12,7 @@ x-long-start-period: &long-start-period services: postgres: - image: blackducksoftware/blackduck-postgres:14-1.21_ubi9.3 + image: blackducksoftware/blackduck-postgres:14-1.22_ubi9.3 ports: ['55436:5432'] volumes: - postgres96-data-volume:/bitnami/postgresql @@ -45,7 +45,7 @@ services: restart_policy: {condition: on-failure, delay: 5s, window: 60s} authentication: user: authentication:root - image: blackducksoftware/blackduck-authentication:2024.1.1_ubi9.3 + image: blackducksoftware/blackduck-authentication:2024.4.0_ubi9.3 volumes: - authentication-volume:/opt/blackduck/hub/hub-authentication/ldap - {type: tmpfs, target: /opt/blackduck/hub/hub-authentication/security} @@ -67,7 +67,7 @@ services: restart_policy: {condition: on-failure, delay: 5s, window: 60s} webapp: user: webapp:root - image: blackducksoftware/blackduck-webapp:2024.1.1_ubi9.3 + image: blackducksoftware/blackduck-webapp:2024.4.0_ubi9.3 volumes: - log-volume:/opt/blackduck/hub/logs - {type: tmpfs, target: /opt/blackduck/hub/hub-webapp/security} @@ -89,7 +89,7 @@ services: restart_policy: {condition: on-failure, delay: 5s, window: 60s} scan: user: scan:root - image: blackducksoftware/blackduck-scan:2024.1.1_ubi9.3 + image: blackducksoftware/blackduck-scan:2024.4.0_ubi9.3 env_file: [blackduck-config.env ] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'https://localhost:8443/api/health-checks/liveness', @@ -110,7 +110,7 @@ services: restart_policy: {condition: on-failure, delay: 5s, window: 60s} storage: user: storage:root - image: blackducksoftware/blackduck-storage:2024.1.1_ubi9.3 + image: blackducksoftware/blackduck-storage:2024.4.0_ubi9.3 env_file: [blackduck-config.env ] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'https://localhost:8443/api/health-checks/liveness', @@ -132,7 +132,7 @@ services: restart_policy: {condition: on-failure, delay: 5s, window: 60s} jobrunner: user: jobrunner:root - image: blackducksoftware/blackduck-jobrunner:2024.1.1_ubi9.3 + image: blackducksoftware/blackduck-jobrunner:2024.4.0_ubi9.3 env_file: [blackduck-config.env ] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'https://localhost:8443/health-checks/liveness', @@ -156,7 +156,7 @@ services: mode: replicated restart_policy: {condition: on-failure, delay: 5s, window: 60s} cfssl: - image: blackducksoftware/blackduck-cfssl:1.0.25_ubi9.3 + image: blackducksoftware/blackduck-cfssl:1.0.26_ubi9.3 volumes: ['cert-volume:/etc/cfssl'] env_file: [blackduck-config.env] healthcheck: @@ -169,7 +169,7 @@ services: mode: replicated restart_policy: {condition: on-failure, delay: 5s, window: 60s} logstash: - image: blackducksoftware/blackduck-logstash:1.0.35_ubi9.3 + image: blackducksoftware/blackduck-logstash:1.0.36_ubi9.3 volumes: ['log-volume:/var/lib/logstash/data'] env_file: [blackduck-config.env] healthcheck: @@ -184,7 +184,7 @@ services: mode: replicated restart_policy: {condition: on-failure, delay: 5s, window: 60s} registration: - image: blackducksoftware/blackduck-registration:2024.1.1_ubi9.3 + image: blackducksoftware/blackduck-registration:2024.4.0_ubi9.3 volumes: - config-volume:/opt/blackduck/hub/hub-registration/config - {type: tmpfs, target: /opt/blackduck/hub/hub-registration/security} @@ -205,7 +205,7 @@ services: mode: replicated restart_policy: {condition: on-failure, delay: 5s, window: 60s} webserver: - image: blackducksoftware/blackduck-nginx:2.0.66_ubi9.3 + image: blackducksoftware/blackduck-nginx:2024.4.0_ubi9.3 ports: ['443:8443'] env_file: [hub-webserver.env, blackduck-config.env] environment: @@ -222,23 +222,9 @@ services: deploy: mode: replicated restart_policy: {condition: on-failure, delay: 15s, window: 60s} - webui: - image: blackducksoftware/blackduck-webui:2024.1.1_ubi9.3 - healthcheck: - test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'https://localhost:8443/health-checks/liveness', - /opt/blackduck/hub/hub-ui/security/root.crt] - interval: 30s - timeout: 10s - retries: 5 - user: webui:root - volumes: - - {type: tmpfs, target: /opt/blackduck/hub/hub-ui/security} - deploy: - mode: replicated - restart_policy: {condition: on-failure, delay: 15s, window: 60s} documentation: - image: blackducksoftware/blackduck-documentation:2024.1.1_ubi9.3 + image: blackducksoftware/blackduck-documentation:2024.4.0_ubi9.3 env_file: [blackduck-config.env] user: documentation:root environment: @@ -255,7 +241,7 @@ services: mode: replicated restart_policy: {condition: on-failure, delay: 5s, window: 60s} redis: - image: blackducksoftware/blackduck-redis:2024.1.1_ubi9.3 + image: blackducksoftware/blackduck-redis:2024.4.0_ubi9.3 env_file: [blackduck-config.env] environment: HUB_JOBRUNNER_HOST: 'tasks.jobrunner.' @@ -274,7 +260,7 @@ services: deploy: restart_policy: {condition: any} bomengine: - image: blackducksoftware/blackduck-bomengine:2024.1.1_ubi9.3 + image: blackducksoftware/blackduck-bomengine:2024.4.0_ubi9.3 env_file: [blackduck-config.env ] environment: << : *pg-usage-settings @@ -295,7 +281,7 @@ services: mode: replicated restart_policy: {condition: on-failure, delay: 5s, window: 60s} matchengine: - image: blackducksoftware/blackduck-matchengine:2024.1.1_ubi9.3 + image: blackducksoftware/blackduck-matchengine:2024.4.0_ubi9.3 user: matchengine:root healthcheck: test: [ CMD, /usr/local/bin/docker-healthcheck.sh, 'https://localhost:8443/api/health-checks/liveness', @@ -316,7 +302,7 @@ services: HUB_JOBRUNNER_HOST: 'tasks.jobrunner.' SYNOPSYS_CRYPTO_PROFILE: 'SWARM' rabbitmq: - image: blackducksoftware/rabbitmq:1.2.36_ubi9.3 + image: blackducksoftware/rabbitmq:1.2.37_ubi9.3 hostname: rabbitmq volumes: - rabbitmq-data-volume:/var/lib/rabbitmq diff --git a/docker-swarm/docker-compose.yml b/docker-swarm/docker-compose.yml index e3e02a0..239c750 100644 --- a/docker-swarm/docker-compose.yml +++ b/docker-swarm/docker-compose.yml @@ -12,7 +12,7 @@ x-long-start-period: &long-start-period services: postgres: - image: blackducksoftware/blackduck-postgres:14-1.21 + image: blackducksoftware/blackduck-postgres:14-1.22 ports: ['55436:5432'] volumes: - postgres96-data-volume:/bitnami/postgresql @@ -45,7 +45,7 @@ services: restart_policy: {condition: on-failure, delay: 5s, window: 60s} authentication: user: authentication:root - image: blackducksoftware/blackduck-authentication:2024.1.1 + image: blackducksoftware/blackduck-authentication:2024.4.0 volumes: - authentication-volume:/opt/blackduck/hub/hub-authentication/ldap - {type: tmpfs, target: /opt/blackduck/hub/hub-authentication/security} @@ -67,7 +67,7 @@ services: restart_policy: {condition: on-failure, delay: 5s, window: 60s} webapp: user: webapp:root - image: blackducksoftware/blackduck-webapp:2024.1.1 + image: blackducksoftware/blackduck-webapp:2024.4.0 volumes: - log-volume:/opt/blackduck/hub/logs - {type: tmpfs, target: /opt/blackduck/hub/hub-webapp/security} @@ -89,7 +89,7 @@ services: restart_policy: {condition: on-failure, delay: 5s, window: 60s} scan: user: scan:root - image: blackducksoftware/blackduck-scan:2024.1.1 + image: blackducksoftware/blackduck-scan:2024.4.0 env_file: [blackduck-config.env ] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'https://localhost:8443/api/health-checks/liveness', @@ -110,7 +110,7 @@ services: restart_policy: {condition: on-failure, delay: 5s, window: 60s} storage: user: storage:root - image: blackducksoftware/blackduck-storage:2024.1.1 + image: blackducksoftware/blackduck-storage:2024.4.0 env_file: [blackduck-config.env ] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'https://localhost:8443/api/health-checks/liveness', @@ -132,7 +132,7 @@ services: restart_policy: {condition: on-failure, delay: 5s, window: 60s} jobrunner: user: jobrunner:root - image: blackducksoftware/blackduck-jobrunner:2024.1.1 + image: blackducksoftware/blackduck-jobrunner:2024.4.0 env_file: [blackduck-config.env ] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'https://localhost:8443/health-checks/liveness', @@ -156,7 +156,7 @@ services: mode: replicated restart_policy: {condition: on-failure, delay: 5s, window: 60s} cfssl: - image: blackducksoftware/blackduck-cfssl:1.0.25 + image: blackducksoftware/blackduck-cfssl:1.0.26 volumes: ['cert-volume:/etc/cfssl'] env_file: [blackduck-config.env] healthcheck: @@ -169,7 +169,7 @@ services: mode: replicated restart_policy: {condition: on-failure, delay: 5s, window: 60s} logstash: - image: blackducksoftware/blackduck-logstash:1.0.35 + image: blackducksoftware/blackduck-logstash:1.0.36 volumes: ['log-volume:/var/lib/logstash/data'] env_file: [blackduck-config.env] healthcheck: @@ -184,7 +184,7 @@ services: mode: replicated restart_policy: {condition: on-failure, delay: 5s, window: 60s} registration: - image: blackducksoftware/blackduck-registration:2024.1.1 + image: blackducksoftware/blackduck-registration:2024.4.0 volumes: - config-volume:/opt/blackduck/hub/hub-registration/config - {type: tmpfs, target: /opt/blackduck/hub/hub-registration/security} @@ -205,7 +205,7 @@ services: mode: replicated restart_policy: {condition: on-failure, delay: 5s, window: 60s} webserver: - image: blackducksoftware/blackduck-nginx:2.0.66 + image: blackducksoftware/blackduck-nginx:2024.4.0 ports: ['443:8443'] env_file: [hub-webserver.env, blackduck-config.env] environment: @@ -222,23 +222,9 @@ services: deploy: mode: replicated restart_policy: {condition: on-failure, delay: 15s, window: 60s} - webui: - image: blackducksoftware/blackduck-webui:2024.1.1 - healthcheck: - test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'https://localhost:8443/health-checks/liveness', - /opt/blackduck/hub/hub-ui/security/root.crt] - interval: 30s - timeout: 10s - retries: 5 - user: webui:root - volumes: - - {type: tmpfs, target: /opt/blackduck/hub/hub-ui/security} - deploy: - mode: replicated - restart_policy: {condition: on-failure, delay: 15s, window: 60s} documentation: - image: blackducksoftware/blackduck-documentation:2024.1.1 + image: blackducksoftware/blackduck-documentation:2024.4.0 env_file: [blackduck-config.env] user: documentation:root environment: @@ -255,7 +241,7 @@ services: mode: replicated restart_policy: {condition: on-failure, delay: 5s, window: 60s} redis: - image: blackducksoftware/blackduck-redis:2024.1.1 + image: blackducksoftware/blackduck-redis:2024.4.0 env_file: [blackduck-config.env] environment: HUB_JOBRUNNER_HOST: 'tasks.jobrunner.' @@ -274,7 +260,7 @@ services: deploy: restart_policy: {condition: any} bomengine: - image: blackducksoftware/blackduck-bomengine:2024.1.1 + image: blackducksoftware/blackduck-bomengine:2024.4.0 env_file: [blackduck-config.env ] environment: << : *pg-usage-settings @@ -295,7 +281,7 @@ services: mode: replicated restart_policy: {condition: on-failure, delay: 5s, window: 60s} matchengine: - image: blackducksoftware/blackduck-matchengine:2024.1.1 + image: blackducksoftware/blackduck-matchengine:2024.4.0 user: matchengine:root healthcheck: test: [ CMD, /usr/local/bin/docker-healthcheck.sh, 'https://localhost:8443/api/health-checks/liveness', @@ -316,7 +302,7 @@ services: HUB_JOBRUNNER_HOST: 'tasks.jobrunner.' SYNOPSYS_CRYPTO_PROFILE: 'SWARM' rabbitmq: - image: blackducksoftware/rabbitmq:1.2.36 + image: blackducksoftware/rabbitmq:1.2.37 hostname: rabbitmq volumes: - rabbitmq-data-volume:/var/lib/rabbitmq diff --git a/docker-swarm/hub-rl.env b/docker-swarm/hub-rl.env new file mode 100644 index 0000000..f817154 --- /dev/null +++ b/docker-swarm/hub-rl.env @@ -0,0 +1,15 @@ +# specify where RabbitMQ is at +RABBIT_MQ_HOST=rabbitmq +RABBIT_MQ_PORT=5671 +RABBITMQ_CTL_ERL_ARGS=-proto_dist inet_tls + +# used by worker and rabbitmq +BROKER_URL=amqps://rabbitmq/blackduck +BROKER_USE_SSL=yes + +# CFSSL config +CFSSL=cfssl:8888 +HUB_LOGSTASH_HOST=logstash +HTTPS_VERIFY_CERTS=yes +RABBITMQ_DEFAULT_VHOST=blackduck +CLIENT_CERT_CN=rlservice diff --git a/docker-swarm/sizes-gen02/resources.yaml b/docker-swarm/sizes-gen02/resources.yaml index c5f64f2..6b94452 100644 --- a/docker-swarm/sizes-gen02/resources.yaml +++ b/docker-swarm/sizes-gen02/resources.yaml @@ -115,17 +115,6 @@ services: memory: 512M replicas: 1 - webui: - deploy: - resources: - limits: - cpus: "1" - memory: 640M - reservations: - cpus: "0.5" - memory: 640M - replicas: 1 - documentation: deploy: resources: diff --git a/docker-swarm/sizes-gen03/1000sph.yaml b/docker-swarm/sizes-gen03/1000sph.yaml index b6b7f4e..1caf868 100644 --- a/docker-swarm/sizes-gen03/1000sph.yaml +++ b/docker-swarm/sizes-gen03/1000sph.yaml @@ -190,13 +190,3 @@ services: cpus: ".400" memory: "1024M" replicas: 1 - webui: - deploy: - resources: - limits: - cpus: ".600" - memory: "1536M" - reservations: - cpus: ".300" - memory: "1024M" - replicas: 1 diff --git a/docker-swarm/sizes-gen03/10sph.yaml b/docker-swarm/sizes-gen03/10sph.yaml index 3370ecf..d054f23 100644 --- a/docker-swarm/sizes-gen03/10sph.yaml +++ b/docker-swarm/sizes-gen03/10sph.yaml @@ -189,13 +189,3 @@ services: cpus: ".200" memory: "512M" replicas: 1 - webui: - deploy: - resources: - limits: - cpus: ".300" - memory: "512M" - reservations: - cpus: ".200" - memory: "512M" - replicas: 1 diff --git a/docker-swarm/sizes-gen03/120sph.yaml b/docker-swarm/sizes-gen03/120sph.yaml index c73495f..1d8c6fe 100644 --- a/docker-swarm/sizes-gen03/120sph.yaml +++ b/docker-swarm/sizes-gen03/120sph.yaml @@ -189,13 +189,3 @@ services: cpus: ".200" memory: "512M" replicas: 1 - webui: - deploy: - resources: - limits: - cpus: ".300" - memory: "512M" - reservations: - cpus: ".200" - memory: "512M" - replicas: 1 diff --git a/docker-swarm/sizes-gen03/1500sph.yaml b/docker-swarm/sizes-gen03/1500sph.yaml index 4627f01..96fcc65 100644 --- a/docker-swarm/sizes-gen03/1500sph.yaml +++ b/docker-swarm/sizes-gen03/1500sph.yaml @@ -190,13 +190,3 @@ services: cpus: ".400" memory: "1024M" replicas: 1 - webui: - deploy: - resources: - limits: - cpus: "1.000" - memory: "2048M" - reservations: - cpus: ".300" - memory: "1024M" - replicas: 1 diff --git a/docker-swarm/sizes-gen03/2000sph.yaml b/docker-swarm/sizes-gen03/2000sph.yaml index 3dea169..27c9b9f 100644 --- a/docker-swarm/sizes-gen03/2000sph.yaml +++ b/docker-swarm/sizes-gen03/2000sph.yaml @@ -190,13 +190,3 @@ services: cpus: ".400" memory: "1024M" replicas: 1 - webui: - deploy: - resources: - limits: - cpus: "1.000" - memory: "2048M" - reservations: - cpus: ".300" - memory: "1024M" - replicas: 1 diff --git a/docker-swarm/sizes-gen03/250sph.yaml b/docker-swarm/sizes-gen03/250sph.yaml index b656759..7f30dbc 100644 --- a/docker-swarm/sizes-gen03/250sph.yaml +++ b/docker-swarm/sizes-gen03/250sph.yaml @@ -189,13 +189,3 @@ services: cpus: ".200" memory: "512M" replicas: 1 - webui: - deploy: - resources: - limits: - cpus: ".300" - memory: "512M" - reservations: - cpus: ".200" - memory: "512M" - replicas: 1 diff --git a/docker-swarm/sizes-gen03/500sph.yaml b/docker-swarm/sizes-gen03/500sph.yaml index 53e3198..0bf9cf5 100644 --- a/docker-swarm/sizes-gen03/500sph.yaml +++ b/docker-swarm/sizes-gen03/500sph.yaml @@ -189,13 +189,3 @@ services: cpus: ".300" memory: "512M" replicas: 1 - webui: - deploy: - resources: - limits: - cpus: ".400" - memory: "1024M" - reservations: - cpus: ".300" - memory: "512M" - replicas: 1 diff --git a/docker-swarm/sizes-gen04/1000sph.yaml b/docker-swarm/sizes-gen04/1000sph.yaml index 57a8bda..73cf76b 100644 --- a/docker-swarm/sizes-gen04/1000sph.yaml +++ b/docker-swarm/sizes-gen04/1000sph.yaml @@ -157,7 +157,7 @@ services: replicas: 10 storage: environment: - HUB_MAX_MEMORY: "7373m" + HUB_MAX_MEMORY: "6554m" deploy: resources: limits: @@ -189,13 +189,3 @@ services: cpus: ".400" memory: "1024M" replicas: 1 - webui: - deploy: - resources: - limits: - cpus: ".600" - memory: "1536M" - reservations: - cpus: ".300" - memory: "1024M" - replicas: 1 diff --git a/docker-swarm/sizes-gen04/10sph.yaml b/docker-swarm/sizes-gen04/10sph.yaml index 9888796..a568b7e 100644 --- a/docker-swarm/sizes-gen04/10sph.yaml +++ b/docker-swarm/sizes-gen04/10sph.yaml @@ -189,13 +189,3 @@ services: cpus: ".200" memory: "512M" replicas: 1 - webui: - deploy: - resources: - limits: - cpus: ".300" - memory: "512M" - reservations: - cpus: ".200" - memory: "512M" - replicas: 1 diff --git a/docker-swarm/sizes-gen04/120sph.yaml b/docker-swarm/sizes-gen04/120sph.yaml index 92a2275..7071340 100644 --- a/docker-swarm/sizes-gen04/120sph.yaml +++ b/docker-swarm/sizes-gen04/120sph.yaml @@ -157,7 +157,7 @@ services: replicas: 1 storage: environment: - HUB_MAX_MEMORY: "2304m" + HUB_MAX_MEMORY: "1536m" deploy: resources: limits: @@ -189,13 +189,3 @@ services: cpus: ".200" memory: "512M" replicas: 1 - webui: - deploy: - resources: - limits: - cpus: ".300" - memory: "512M" - reservations: - cpus: ".200" - memory: "512M" - replicas: 1 diff --git a/docker-swarm/sizes-gen04/1500sph.yaml b/docker-swarm/sizes-gen04/1500sph.yaml index 1cb7afa..6855531 100644 --- a/docker-swarm/sizes-gen04/1500sph.yaml +++ b/docker-swarm/sizes-gen04/1500sph.yaml @@ -157,7 +157,7 @@ services: replicas: 13 storage: environment: - HUB_MAX_MEMORY: "7373m" + HUB_MAX_MEMORY: "6554m" deploy: resources: limits: @@ -189,13 +189,3 @@ services: cpus: ".400" memory: "1024M" replicas: 1 - webui: - deploy: - resources: - limits: - cpus: ".600" - memory: "1536M" - reservations: - cpus: ".300" - memory: "1024M" - replicas: 1 diff --git a/docker-swarm/sizes-gen04/2000sph.yaml b/docker-swarm/sizes-gen04/2000sph.yaml index 6ad8151..fc1260a 100644 --- a/docker-swarm/sizes-gen04/2000sph.yaml +++ b/docker-swarm/sizes-gen04/2000sph.yaml @@ -157,7 +157,7 @@ services: replicas: 16 storage: environment: - HUB_MAX_MEMORY: "9100m" + HUB_MAX_MEMORY: "8192m" deploy: resources: limits: @@ -189,13 +189,3 @@ services: cpus: ".400" memory: "1024M" replicas: 1 - webui: - deploy: - resources: - limits: - cpus: ".600" - memory: "1536M" - reservations: - cpus: ".300" - memory: "1024M" - replicas: 1 diff --git a/docker-swarm/sizes-gen04/250sph.yaml b/docker-swarm/sizes-gen04/250sph.yaml index 551e3a5..81cf973 100644 --- a/docker-swarm/sizes-gen04/250sph.yaml +++ b/docker-swarm/sizes-gen04/250sph.yaml @@ -157,7 +157,7 @@ services: replicas: 2 storage: environment: - HUB_MAX_MEMORY: "2765m" + HUB_MAX_MEMORY: "1996m" deploy: resources: limits: @@ -189,13 +189,3 @@ services: cpus: ".200" memory: "512M" replicas: 1 - webui: - deploy: - resources: - limits: - cpus: ".300" - memory: "512M" - reservations: - cpus: ".200" - memory: "512M" - replicas: 1 diff --git a/docker-swarm/sizes-gen04/500sph.yaml b/docker-swarm/sizes-gen04/500sph.yaml index 38991be..7bf10ee 100644 --- a/docker-swarm/sizes-gen04/500sph.yaml +++ b/docker-swarm/sizes-gen04/500sph.yaml @@ -157,7 +157,7 @@ services: replicas: 4 storage: environment: - HUB_MAX_MEMORY: "3687m" + HUB_MAX_MEMORY: "3072m" deploy: resources: limits: @@ -189,13 +189,3 @@ services: cpus: ".300" memory: "512M" replicas: 1 - webui: - deploy: - resources: - limits: - cpus: ".400" - memory: "1024M" - reservations: - cpus: ".300" - memory: "512M" - replicas: 1 diff --git a/docs/en_US/getting_started.pdf b/docs/en_US/getting_started.pdf index d5077f9..dbc5e64 100644 Binary files a/docs/en_US/getting_started.pdf and b/docs/en_US/getting_started.pdf differ diff --git a/docs/en_US/install_kubernetes.pdf b/docs/en_US/install_kubernetes.pdf index 1261062..c8be00f 100644 Binary files a/docs/en_US/install_kubernetes.pdf and b/docs/en_US/install_kubernetes.pdf differ diff --git a/docs/en_US/install_openshift.pdf b/docs/en_US/install_openshift.pdf index a3e80a0..db22238 100644 Binary files a/docs/en_US/install_openshift.pdf and b/docs/en_US/install_openshift.pdf differ diff --git a/docs/en_US/install_swarm.pdf b/docs/en_US/install_swarm.pdf index 1f4b3cf..d68642b 100644 Binary files a/docs/en_US/install_swarm.pdf and b/docs/en_US/install_swarm.pdf differ diff --git a/docs/en_US/release_notes.pdf b/docs/en_US/release_notes.pdf index 0f084eb..396ece8 100644 Binary files a/docs/en_US/release_notes.pdf and b/docs/en_US/release_notes.pdf differ diff --git a/docs/ja_JA/getting_started.pdf b/docs/ja_JA/getting_started.pdf index afd86bf..aff3b0f 100644 Binary files a/docs/ja_JA/getting_started.pdf and b/docs/ja_JA/getting_started.pdf differ diff --git a/docs/ja_JA/install_kubernetes.pdf b/docs/ja_JA/install_kubernetes.pdf index 88999ec..f6a8b21 100644 Binary files a/docs/ja_JA/install_kubernetes.pdf and b/docs/ja_JA/install_kubernetes.pdf differ diff --git a/docs/ja_JA/install_openshift.pdf b/docs/ja_JA/install_openshift.pdf index 9c7291b..d042c3a 100644 Binary files a/docs/ja_JA/install_openshift.pdf and b/docs/ja_JA/install_openshift.pdf differ diff --git a/docs/ja_JA/install_swarm.pdf b/docs/ja_JA/install_swarm.pdf index 981436f..05ad2e0 100644 Binary files a/docs/ja_JA/install_swarm.pdf and b/docs/ja_JA/install_swarm.pdf differ diff --git a/docs/ja_JA/release_notes.pdf b/docs/ja_JA/release_notes.pdf index 162cc73..9d045f6 100644 Binary files a/docs/ja_JA/release_notes.pdf and b/docs/ja_JA/release_notes.pdf differ diff --git a/docs/zh_CN/getting_started.pdf b/docs/zh_CN/getting_started.pdf index d64b0e2..4f99740 100644 Binary files a/docs/zh_CN/getting_started.pdf and b/docs/zh_CN/getting_started.pdf differ diff --git a/docs/zh_CN/install_kubernetes.pdf b/docs/zh_CN/install_kubernetes.pdf index d5a90ad..57ee535 100644 Binary files a/docs/zh_CN/install_kubernetes.pdf and b/docs/zh_CN/install_kubernetes.pdf differ diff --git a/docs/zh_CN/install_openshift.pdf b/docs/zh_CN/install_openshift.pdf index 8d87134..3d281e9 100644 Binary files a/docs/zh_CN/install_openshift.pdf and b/docs/zh_CN/install_openshift.pdf differ diff --git a/docs/zh_CN/install_swarm.pdf b/docs/zh_CN/install_swarm.pdf index 9edc3ea..0e5ef5b 100644 Binary files a/docs/zh_CN/install_swarm.pdf and b/docs/zh_CN/install_swarm.pdf differ diff --git a/docs/zh_CN/release_notes.pdf b/docs/zh_CN/release_notes.pdf index b0859bb..97aee37 100644 Binary files a/docs/zh_CN/release_notes.pdf and b/docs/zh_CN/release_notes.pdf differ diff --git a/kubernetes/blackduck/Chart.yaml b/kubernetes/blackduck/Chart.yaml index ef8d369..40e3375 100644 --- a/kubernetes/blackduck/Chart.yaml +++ b/kubernetes/blackduck/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v1 -appVersion: 2024.1.1 +appVersion: 2024.4.0 name: blackduck description: Black Duck Helm Chart -version: 2024.1.1 +version: 2024.4.0 diff --git a/kubernetes/blackduck/README.md b/kubernetes/blackduck/README.md index 61e2e20..6ac6aa3 100644 --- a/kubernetes/blackduck/README.md +++ b/kubernetes/blackduck/README.md @@ -8,11 +8,11 @@ This chart bootstraps **Black Duck** deployment on a **Kubernetes** cluster usin * storageClass configured that allows persistent volumes. * Helm3 * Add the Synopsys repository to Helm repository + ```bash $ helm repo add synopsys https://sig-repo.synopsys.com/artifactory/sig-cloudnative ``` - ## Quick Start Parameters * `name` @@ -211,11 +211,6 @@ To update the deployment: $ helm upgrade ${BD_NAME} synopsys/blackduck -f my-values.yaml --reuse-values --namespace ${BD_NAME} ``` -## Additional documentation - -Please refer to the Synopsysctl document for some common configurations -https://synopsys.atlassian.net/wiki/spaces/BDLM/pages/65700255/Black+Duck+Configuration - ## Configuration The following table lists the configurable parameters of the Black Duck chart and their default values. @@ -247,7 +242,7 @@ The following table lists the configurable parameters of the Black Duck chart an | Parameter | Description | Default | |----------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `registry` | Image repository | `docker.io/blackducksoftware` | -| `imageTag` | Version of Black Duck | `2024.1.1` | +| `imageTag` | Version of Black Duck | `2024.4.0` | | `imagePullSecrets` | Reference to one or more secrets to be used when pulling images | `[]` | | `tlsCertSecretName` | Name of Webserver TLS Secret containing Certificates (if not provided Certificates will be generated) | | | `exposeui` | Enable Black Duck Web Server User Interface (UI) | `true` | @@ -352,7 +347,7 @@ The following table lists the configurable parameters of the Black Duck chart an | Parameter | Description | Default | |-------------------------------------------|----------------------------------------------------|--------------------------| | `binaryscanner.registry` | Image repository to be override at container level | `docker.io/sigsynopsys` | -| `binaryscanner.imageTag` | Image tag to be override at container level | `2023.12.3` | +| `binaryscanner.imageTag` | Image tag to be override at container level | `2024.3.0` | | `binaryscanner.resources.limits.Cpu` | Binary Scanner container CPU Limit | `1000m` | | `binaryscanner.resources.requests.Cpu` | Binary Scanner container CPU request | `1000m` | | `binaryscanner.resources.limits.memory` | Binary Scanner container Memory Limit | `2048Mi` | @@ -368,7 +363,7 @@ The following table lists the configurable parameters of the Black Duck chart an | Parameter | Description | Default | |-----------------------------------|----------------------------------------------------------|------------------| | `cfssl.registry` | Image repository to be override at container level | | -| `cfssl.imageTag` | Image tag to be override at container level | `1.0.25` | +| `cfssl.imageTag` | Image tag to be override at container level | `1.0.26` | | `cfssl.resources.limits.memory` | Cfssl container Memory Limit | `640Mi` | | `cfssl.resources.requests.memory` | Cfssl container Memory request | `640Mi` | | `cfssl.persistentVolumeClaimName` | Point to an existing Cfssl Persistent Volume Claim (PVC) | | @@ -429,7 +424,7 @@ The following table lists the configurable parameters of the Black Duck chart an | Parameter | Description | Default | |--------------------------------------|----------------------------------------------------|---------------------| | `rabbitmq.registry` | Image repository to be override at container level | | -| `rabbitmq.imageTag` | Image tag to be override at container level | `1.2.36` | +| `rabbitmq.imageTag` | Image tag to be override at container level | `1.2.37` | | `rabbitmq.resources.limits.memory` | RabbitMQ container Memory Limit | `1024Mi` | | `rabbitmq.resources.requests.memory` | RabbitMQ container Memory request | `1024Mi` | | `rabbitmq.nodeSelector` | RabbitMQ node labels for pod assignment | `{}` | @@ -568,7 +563,7 @@ storage: | Parameter | Description | Default | |--------------------------------------|-------------------------------------------------------------|---------------------| | `logstash.registry` | Image repository to be override at container level | | -| `logstash.imageTag` | Image tag to be override at container level | `1.0.35` | +| `logstash.imageTag` | Image tag to be override at container level | `1.0.36` | | `logstash.resources.limits.memory` | Logstash container Memory Limit | `1024Mi` | | `logstash.resources.requests.memory` | Logstash container Memory request | `1024Mi` | | `logstash.persistentVolumeClaimName` | Point to an existing Logstash Persistent Volume Claim (PVC) | | @@ -585,7 +580,7 @@ storage: | Parameter | Description | Default | |---------------------------------------|----------------------------------------------------|------------------| | `webserver.registry` | Image repository to be override at container level | | -| `webserver.imageTag` | Image tag to be override at container level | `2.0.66` | +| `webserver.imageTag` | Image tag to be override at container level | `2024.4.0` | | `webserver.resources.limits.memory` | Webserver container Memory Limit | `512Mi` | | `webserver.resources.requests.memory` | Webserver container Memory request | `512Mi` | | `webserver.nodeSelector` | Webserver node labels for pod assignment | `{}` | @@ -617,7 +612,7 @@ storage: |---------------------------|----------------------------------------------------------------------------|--------------------| | `datadog.enable` | only true for hosted customers (Values.enableInitContainer should be true) | false | | `datadog.registry` | Image repository to be override at container level | | -| `datadog.imageTag` | Image tag to be override at container level | `1.0.12` | +| `datadog.imageTag` | Image tag to be override at container level | `1.0.14` | | `datadog.imagePullPolicy` | Image pull policy | IfNotPresent | Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. @@ -625,7 +620,7 @@ Specify each parameter using the `--set key=value[,key=value]` argument to `helm Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example, ```console -$ helm install . --name ${BD_NAME} --namespace ${BD_NAME} -f .yaml --set tlsCertSecretName=${BD_NAME}-blackduck-webserver-certificate -f values.yaml +$ helm install . --name ${BD_NAME} --namespace ${BD_NAME} --set tlsCertSecretName=${BD_NAME}-blackduck-webserver-certificate -f values.yaml -f .yaml ``` > **Tip**: You can use the default [values.yaml](values.yaml) diff --git a/kubernetes/blackduck/sizes-gen02/large.yaml b/kubernetes/blackduck/sizes-gen02/large.yaml index 6697e45..a94dc5f 100644 --- a/kubernetes/blackduck/sizes-gen02/large.yaml +++ b/kubernetes/blackduck/sizes-gen02/large.yaml @@ -57,14 +57,6 @@ webserver: limits: memory: "2048Mi" -webui: - resources: - requests: - cpu: "500m" - limits: - cpu: "1000m" - memory: "1024Mi" - bomengine: replicas: 4 hubMaxMemory: "12288m" diff --git a/kubernetes/blackduck/sizes-gen02/medium.yaml b/kubernetes/blackduck/sizes-gen02/medium.yaml index 45ffd19..1281e40 100644 --- a/kubernetes/blackduck/sizes-gen02/medium.yaml +++ b/kubernetes/blackduck/sizes-gen02/medium.yaml @@ -57,14 +57,6 @@ webserver: limits: memory: "2048Mi" -webui: - resources: - requests: - cpu: "500m" - limits: - cpu: "1000m" - memory: "640Mi" - bomengine: replicas: 2 hubMaxMemory: "6144m" diff --git a/kubernetes/blackduck/sizes-gen02/small.yaml b/kubernetes/blackduck/sizes-gen02/small.yaml index 2897c77..e274841 100644 --- a/kubernetes/blackduck/sizes-gen02/small.yaml +++ b/kubernetes/blackduck/sizes-gen02/small.yaml @@ -57,14 +57,6 @@ webserver: limits: memory: "512Mi" -webui: - resources: - requests: - cpu: "500m" - limits: - cpu: "1000m" - memory: "640Mi" - bomengine: replicas: 1 hubMaxMemory: "4096m" diff --git a/kubernetes/blackduck/sizes-gen02/x-large.yaml b/kubernetes/blackduck/sizes-gen02/x-large.yaml index 837e9b2..74dc46e 100644 --- a/kubernetes/blackduck/sizes-gen02/x-large.yaml +++ b/kubernetes/blackduck/sizes-gen02/x-large.yaml @@ -57,14 +57,6 @@ webserver: limits: memory: "2048Mi" -webui: - resources: - requests: - cpu: "500m" - limits: - cpu: "1000m" - memory: "1024Mi" - bomengine: replicas: 6 hubMaxMemory: "12288m" diff --git a/kubernetes/blackduck/sizes-gen03/1000sph.yaml b/kubernetes/blackduck/sizes-gen03/1000sph.yaml index ce0d9e8..9f65abf 100644 --- a/kubernetes/blackduck/sizes-gen03/1000sph.yaml +++ b/kubernetes/blackduck/sizes-gen03/1000sph.yaml @@ -187,13 +187,3 @@ webserver: requests: cpu: "400m" memory: "1024Mi" - -webui: - replicas: 1 - resources: - limits: - cpu: "600m" - memory: "1536Mi" - requests: - cpu: "300m" - memory: "1024Mi" diff --git a/kubernetes/blackduck/sizes-gen03/10sph.yaml b/kubernetes/blackduck/sizes-gen03/10sph.yaml index 2a79f98..bb60e7f 100644 --- a/kubernetes/blackduck/sizes-gen03/10sph.yaml +++ b/kubernetes/blackduck/sizes-gen03/10sph.yaml @@ -186,13 +186,3 @@ webserver: requests: cpu: "200m" memory: "512Mi" - -webui: - replicas: 1 - resources: - limits: - cpu: "300m" - memory: "512Mi" - requests: - cpu: "200m" - memory: "512Mi" diff --git a/kubernetes/blackduck/sizes-gen03/120sph.yaml b/kubernetes/blackduck/sizes-gen03/120sph.yaml index be64993..65bb2ed 100644 --- a/kubernetes/blackduck/sizes-gen03/120sph.yaml +++ b/kubernetes/blackduck/sizes-gen03/120sph.yaml @@ -186,13 +186,3 @@ webserver: requests: cpu: "200m" memory: "512Mi" - -webui: - replicas: 1 - resources: - limits: - cpu: "300m" - memory: "512Mi" - requests: - cpu: "200m" - memory: "512Mi" diff --git a/kubernetes/blackduck/sizes-gen03/1500sph.yaml b/kubernetes/blackduck/sizes-gen03/1500sph.yaml index 3ff8b81..2749138 100644 --- a/kubernetes/blackduck/sizes-gen03/1500sph.yaml +++ b/kubernetes/blackduck/sizes-gen03/1500sph.yaml @@ -187,13 +187,3 @@ webserver: requests: cpu: "400m" memory: "1024Mi" - -webui: - replicas: 1 - resources: - limits: - cpu: "1000m" - memory: "2048Mi" - requests: - cpu: "300m" - memory: "1024Mi" diff --git a/kubernetes/blackduck/sizes-gen03/2000sph.yaml b/kubernetes/blackduck/sizes-gen03/2000sph.yaml index 1d7bf67..0a02357 100644 --- a/kubernetes/blackduck/sizes-gen03/2000sph.yaml +++ b/kubernetes/blackduck/sizes-gen03/2000sph.yaml @@ -187,13 +187,3 @@ webserver: requests: cpu: "400m" memory: "1024Mi" - -webui: - replicas: 1 - resources: - limits: - cpu: "1000m" - memory: "2048Mi" - requests: - cpu: "300m" - memory: "1024Mi" diff --git a/kubernetes/blackduck/sizes-gen03/250sph.yaml b/kubernetes/blackduck/sizes-gen03/250sph.yaml index a24c174..c0a32d6 100644 --- a/kubernetes/blackduck/sizes-gen03/250sph.yaml +++ b/kubernetes/blackduck/sizes-gen03/250sph.yaml @@ -186,13 +186,3 @@ webserver: requests: cpu: "200m" memory: "512Mi" - -webui: - replicas: 1 - resources: - limits: - cpu: "300m" - memory: "512Mi" - requests: - cpu: "200m" - memory: "512Mi" diff --git a/kubernetes/blackduck/sizes-gen03/500sph.yaml b/kubernetes/blackduck/sizes-gen03/500sph.yaml index 41341d6..ccb0b03 100644 --- a/kubernetes/blackduck/sizes-gen03/500sph.yaml +++ b/kubernetes/blackduck/sizes-gen03/500sph.yaml @@ -186,13 +186,3 @@ webserver: requests: cpu: "300m" memory: "512Mi" - -webui: - replicas: 1 - resources: - limits: - cpu: "400m" - memory: "1024Mi" - requests: - cpu: "300m" - memory: "512Mi" diff --git a/kubernetes/blackduck/sizes-gen04/1000sph.yaml b/kubernetes/blackduck/sizes-gen04/1000sph.yaml index 2a3540e..5bdd49a 100644 --- a/kubernetes/blackduck/sizes-gen04/1000sph.yaml +++ b/kubernetes/blackduck/sizes-gen04/1000sph.yaml @@ -11,7 +11,7 @@ environs: authentication: replicas: 1 - hubMaxMemory: "1844m" + maxRamPercentage: 90 resources: limits: cpu: "700m" @@ -22,7 +22,7 @@ authentication: bomengine: replicas: 7 - hubMaxMemory: "4608m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -43,7 +43,7 @@ cfssl: documentation: replicas: 1 - hubMaxMemory: "1383m" + maxRamPercentage: 90 resources: limits: cpu: "500m" @@ -54,7 +54,7 @@ documentation: jobrunner: replicas: 5 - hubMaxMemory: "7373m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -65,7 +65,7 @@ jobrunner: logstash: replicas: 1 - hubMaxMemory: "3687m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -76,7 +76,7 @@ logstash: matchengine: replicas: 9 - hubMaxMemory: "9216m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -135,7 +135,7 @@ redis: registration: replicas: 1 - hubMaxMemory: "2765m" + maxRamPercentage: 90 resources: limits: cpu: "700m" @@ -146,7 +146,7 @@ registration: scan: replicas: 10 - hubMaxMemory: "13824m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -157,7 +157,7 @@ scan: storage: replicas: 1 - hubMaxMemory: "7373m" + maxRamPercentage: 80 resources: limits: cpu: "1000m" @@ -168,7 +168,7 @@ storage: webapp: replicas: 1 - hubMaxMemory: "18432m" + maxRamPercentage: 90 dbPoolMaxActive: 64 resources: limits: @@ -187,13 +187,3 @@ webserver: requests: cpu: "400m" memory: "1024Mi" - -webui: - replicas: 1 - resources: - limits: - cpu: "600m" - memory: "1536Mi" - requests: - cpu: "300m" - memory: "1024Mi" diff --git a/kubernetes/blackduck/sizes-gen04/10sph.yaml b/kubernetes/blackduck/sizes-gen04/10sph.yaml index a01f723..b76f2b4 100644 --- a/kubernetes/blackduck/sizes-gen04/10sph.yaml +++ b/kubernetes/blackduck/sizes-gen04/10sph.yaml @@ -11,7 +11,7 @@ environs: authentication: replicas: 1 - hubMaxMemory: "1106m" + maxRamPercentage: 90 resources: limits: cpu: "500m" @@ -22,7 +22,7 @@ authentication: bomengine: replicas: 1 - hubMaxMemory: "4148m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -43,7 +43,7 @@ cfssl: documentation: replicas: 1 - hubMaxMemory: "922m" + maxRamPercentage: 90 resources: limits: cpu: "500m" @@ -54,7 +54,7 @@ documentation: jobrunner: replicas: 1 - hubMaxMemory: "4240m" + maxRamPercentage: 90 resources: limits: cpu: "1500m" @@ -65,7 +65,7 @@ jobrunner: logstash: replicas: 1 - hubMaxMemory: "1106m" + maxRamPercentage: 90 resources: limits: cpu: "500m" @@ -76,7 +76,7 @@ logstash: matchengine: replicas: 1 - hubMaxMemory: "4608m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -135,7 +135,7 @@ redis: registration: replicas: 1 - hubMaxMemory: "922m" + maxRamPercentage: 90 resources: limits: cpu: "300m" @@ -146,7 +146,7 @@ registration: scan: replicas: 1 - hubMaxMemory: "4608m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -157,7 +157,7 @@ scan: storage: replicas: 1 - hubMaxMemory: "1024m" + maxRamPercentage: 50 resources: limits: cpu: "1000m" @@ -168,7 +168,7 @@ storage: webapp: replicas: 1 - hubMaxMemory: "3226m" + maxRamPercentage: 90 resources: limits: cpu: "2000m" @@ -186,13 +186,3 @@ webserver: requests: cpu: "200m" memory: "512Mi" - -webui: - replicas: 1 - resources: - limits: - cpu: "300m" - memory: "512Mi" - requests: - cpu: "200m" - memory: "512Mi" diff --git a/kubernetes/blackduck/sizes-gen04/120sph.yaml b/kubernetes/blackduck/sizes-gen04/120sph.yaml index ac063f7..84920fd 100644 --- a/kubernetes/blackduck/sizes-gen04/120sph.yaml +++ b/kubernetes/blackduck/sizes-gen04/120sph.yaml @@ -11,7 +11,7 @@ environs: authentication: replicas: 1 - hubMaxMemory: "1843m" + maxRamPercentage: 90 resources: limits: cpu: "500m" @@ -22,7 +22,7 @@ authentication: bomengine: replicas: 1 - hubMaxMemory: "5000m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -43,7 +43,7 @@ cfssl: documentation: replicas: 1 - hubMaxMemory: "922m" + maxRamPercentage: 90 resources: limits: cpu: "500m" @@ -54,7 +54,7 @@ documentation: jobrunner: replicas: 1 - hubMaxMemory: "7373m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -65,7 +65,7 @@ jobrunner: logstash: replicas: 1 - hubMaxMemory: "2185m" + maxRamPercentage: 90 resources: limits: cpu: "800m" @@ -76,7 +76,7 @@ logstash: matchengine: replicas: 2 - hubMaxMemory: "7373m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -135,7 +135,7 @@ redis: registration: replicas: 1 - hubMaxMemory: "1200m" + maxRamPercentage: 90 resources: limits: cpu: "300m" @@ -146,7 +146,7 @@ registration: scan: replicas: 1 - hubMaxMemory: "9216m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -157,7 +157,7 @@ scan: storage: replicas: 1 - hubMaxMemory: "2304m" + maxRamPercentage: 60 resources: limits: cpu: "1000m" @@ -168,7 +168,7 @@ storage: webapp: replicas: 1 - hubMaxMemory: "3608m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -186,13 +186,3 @@ webserver: requests: cpu: "200m" memory: "512Mi" - -webui: - replicas: 1 - resources: - limits: - cpu: "300m" - memory: "512Mi" - requests: - cpu: "200m" - memory: "512Mi" diff --git a/kubernetes/blackduck/sizes-gen04/1500sph.yaml b/kubernetes/blackduck/sizes-gen04/1500sph.yaml index 0afc6af..08426df 100644 --- a/kubernetes/blackduck/sizes-gen04/1500sph.yaml +++ b/kubernetes/blackduck/sizes-gen04/1500sph.yaml @@ -11,7 +11,7 @@ environs: authentication: replicas: 1 - hubMaxMemory: "1844m" + maxRamPercentage: 90 resources: limits: cpu: "700m" @@ -22,7 +22,7 @@ authentication: bomengine: replicas: 8 - hubMaxMemory: "4608m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -43,7 +43,7 @@ cfssl: documentation: replicas: 1 - hubMaxMemory: "1383m" + maxRamPercentage: 90 resources: limits: cpu: "500m" @@ -54,7 +54,7 @@ documentation: jobrunner: replicas: 6 - hubMaxMemory: "7373m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -65,7 +65,7 @@ jobrunner: logstash: replicas: 1 - hubMaxMemory: "3687m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -76,7 +76,7 @@ logstash: matchengine: replicas: 12 - hubMaxMemory: "9216m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -135,7 +135,7 @@ redis: registration: replicas: 1 - hubMaxMemory: "2765m" + maxRamPercentage: 90 resources: limits: cpu: "700m" @@ -146,7 +146,7 @@ registration: scan: replicas: 13 - hubMaxMemory: "13824m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -157,7 +157,7 @@ scan: storage: replicas: 1 - hubMaxMemory: "7373m" + maxRamPercentage: 80 resources: limits: cpu: "1000m" @@ -168,7 +168,7 @@ storage: webapp: replicas: 1 - hubMaxMemory: "18432m" + maxRamPercentage: 90 dbPoolMaxActive: 64 resources: limits: @@ -187,13 +187,3 @@ webserver: requests: cpu: "400m" memory: "1024Mi" - -webui: - replicas: 1 - resources: - limits: - cpu: "600m" - memory: "1536Mi" - requests: - cpu: "300m" - memory: "1024Mi" diff --git a/kubernetes/blackduck/sizes-gen04/2000sph.yaml b/kubernetes/blackduck/sizes-gen04/2000sph.yaml index c976866..37450bf 100644 --- a/kubernetes/blackduck/sizes-gen04/2000sph.yaml +++ b/kubernetes/blackduck/sizes-gen04/2000sph.yaml @@ -11,7 +11,7 @@ environs: authentication: replicas: 1 - hubMaxMemory: "2765m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -22,7 +22,7 @@ authentication: bomengine: replicas: 10 - hubMaxMemory: "4608m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -43,7 +43,7 @@ cfssl: documentation: replicas: 1 - hubMaxMemory: "1383m" + maxRamPercentage: 90 resources: limits: cpu: "500m" @@ -54,7 +54,7 @@ documentation: jobrunner: replicas: 8 - hubMaxMemory: "7373m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -65,7 +65,7 @@ jobrunner: logstash: replicas: 1 - hubMaxMemory: "3687m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -76,7 +76,7 @@ logstash: matchengine: replicas: 15 - hubMaxMemory: "9216m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -135,7 +135,7 @@ redis: registration: replicas: 1 - hubMaxMemory: "2765m" + maxRamPercentage: 90 resources: limits: cpu: "700m" @@ -146,7 +146,7 @@ registration: scan: replicas: 16 - hubMaxMemory: "13824m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -157,7 +157,7 @@ scan: storage: replicas: 1 - hubMaxMemory: "9100m" + maxRamPercentage: 80 resources: limits: cpu: "1000m" @@ -168,7 +168,7 @@ storage: webapp: replicas: 1 - hubMaxMemory: "18432m" + maxRamPercentage: 90 dbPoolMaxActive: 64 resources: limits: @@ -187,13 +187,3 @@ webserver: requests: cpu: "400m" memory: "1024Mi" - -webui: - replicas: 1 - resources: - limits: - cpu: "600m" - memory: "1536Mi" - requests: - cpu: "300m" - memory: "1024Mi" diff --git a/kubernetes/blackduck/sizes-gen04/250sph.yaml b/kubernetes/blackduck/sizes-gen04/250sph.yaml index a48dd92..a21eb6d 100644 --- a/kubernetes/blackduck/sizes-gen04/250sph.yaml +++ b/kubernetes/blackduck/sizes-gen04/250sph.yaml @@ -11,7 +11,7 @@ environs: authentication: replicas: 1 - hubMaxMemory: "1843m" + maxRamPercentage: 90 resources: limits: cpu: "500m" @@ -22,7 +22,7 @@ authentication: bomengine: replicas: 1 - hubMaxMemory: "5000m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -43,7 +43,7 @@ cfssl: documentation: replicas: 1 - hubMaxMemory: "922m" + maxRamPercentage: 90 resources: limits: cpu: "500m" @@ -54,7 +54,7 @@ documentation: jobrunner: replicas: 2 - hubMaxMemory: "7373m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -65,7 +65,7 @@ jobrunner: logstash: replicas: 1 - hubMaxMemory: "2765m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -76,7 +76,7 @@ logstash: matchengine: replicas: 3 - hubMaxMemory: "7373m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -135,7 +135,7 @@ redis: registration: replicas: 1 - hubMaxMemory: "1200m" + maxRamPercentage: 90 resources: limits: cpu: "300m" @@ -146,7 +146,7 @@ registration: scan: replicas: 2 - hubMaxMemory: "9216m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -157,7 +157,7 @@ scan: storage: replicas: 1 - hubMaxMemory: "2765m" + maxRamPercentage: 65 resources: limits: cpu: "1000m" @@ -168,7 +168,7 @@ storage: webapp: replicas: 1 - hubMaxMemory: "4608m" + maxRamPercentage: 90 resources: limits: cpu: "2000m" @@ -186,13 +186,3 @@ webserver: requests: cpu: "200m" memory: "512Mi" - -webui: - replicas: 1 - resources: - limits: - cpu: "300m" - memory: "512Mi" - requests: - cpu: "200m" - memory: "512Mi" diff --git a/kubernetes/blackduck/sizes-gen04/500sph.yaml b/kubernetes/blackduck/sizes-gen04/500sph.yaml index 3007367..386b1a1 100644 --- a/kubernetes/blackduck/sizes-gen04/500sph.yaml +++ b/kubernetes/blackduck/sizes-gen04/500sph.yaml @@ -11,7 +11,7 @@ environs: authentication: replicas: 1 - hubMaxMemory: "1843m" + maxRamPercentage: 90 resources: limits: cpu: "500m" @@ -22,7 +22,7 @@ authentication: bomengine: replicas: 2 - hubMaxMemory: "4608m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -43,7 +43,7 @@ cfssl: documentation: replicas: 1 - hubMaxMemory: "922m" + maxRamPercentage: 90 resources: limits: cpu: "500m" @@ -54,7 +54,7 @@ documentation: jobrunner: replicas: 3 - hubMaxMemory: "7373m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -65,7 +65,7 @@ jobrunner: logstash: replicas: 1 - hubMaxMemory: "2765m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -76,7 +76,7 @@ logstash: matchengine: replicas: 4 - hubMaxMemory: "7373m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -135,7 +135,7 @@ redis: registration: replicas: 1 - hubMaxMemory: "1844m" + maxRamPercentage: 90 resources: limits: cpu: "500m" @@ -146,7 +146,7 @@ registration: scan: replicas: 4 - hubMaxMemory: "9216m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" @@ -157,7 +157,7 @@ scan: storage: replicas: 1 - hubMaxMemory: "3687m" + maxRamPercentage: 75 resources: limits: cpu: "1000m" @@ -168,7 +168,7 @@ storage: webapp: replicas: 1 - hubMaxMemory: "5530m" + maxRamPercentage: 90 resources: limits: cpu: "3000m" @@ -186,13 +186,3 @@ webserver: requests: cpu: "300m" memory: "512Mi" - -webui: - replicas: 1 - resources: - limits: - cpu: "400m" - memory: "1024Mi" - requests: - cpu: "300m" - memory: "512Mi" diff --git a/kubernetes/blackduck/templates/_helpers.tpl b/kubernetes/blackduck/templates/_helpers.tpl index c3b39ef..701b948 100644 --- a/kubernetes/blackduck/templates/_helpers.tpl +++ b/kubernetes/blackduck/templates/_helpers.tpl @@ -52,7 +52,6 @@ HUB_SCAN_HOST: {{ .Release.Name }}-blackduck-scan HUB_VERSION: {{ .Values.imageTag }} HUB_WEBAPP_HOST: {{ .Release.Name }}-blackduck-webapp HUB_WEBSERVER_HOST: {{ .Release.Name }}-blackduck-webserver -HUB_WEBUI_HOST: {{ .Release.Name }}-blackduck-ui RABBIT_MQ_HOST: {{ .Release.Name }}-blackduck-rabbitmq {{- if eq .Values.isKubernetes true }} BLACKDUCK_ORCHESTRATION_TYPE: KUBERNETES @@ -306,3 +305,24 @@ imagePullPolicy: IfNotPresent path: backup/seed {{- end -}} {{- end -}} + +{{/* +# Derive a value for HUB_MAX_MEMORY from .resources.limits.memory. +# The scope is expected to be one of the services; e.g., .Values.jobrunner. +*/}} +{{- define "computeHubMaxMemory" }} +{{- if (ne (dig "resources" "limits" "memory" "none" .) "none") }} +{{- $rawMemLimit := .resources.limits.memory | replace "i" "" -}} +{{- $memoryUnit := regexFind "[gmGM]" $rawMemLimit | upper -}} +{{- $numericMemLimit := trimSuffix $memoryUnit $rawMemLimit -}} +{{- $memLimitInMB := (mul $numericMemLimit (ternary 1024 1 (eq $memoryUnit "G"))) -}} +{{- $rawRamPercentage := coalesce .maxRamPercentage $.maxRamPercentage 90 -}} +{{- $maxRamPercentage := divf $rawRamPercentage 100.0 -}} +{{- if (lt (mulf $memLimitInMB $maxRamPercentage) 256.0) }} +{{- $maxRamPercentage := divf (subf $memLimitInMB 256.0) $memLimitInMB -}} +{{- end }} +{{- cat (round (mulf $memLimitInMB $maxRamPercentage) 0) "m" | nospace -}} +{{- else }} +{{- .hubMaxMemory }} +{{- end -}} +{{- end -}} diff --git a/kubernetes/blackduck/templates/authentication.yaml b/kubernetes/blackduck/templates/authentication.yaml index 389d7e6..3091603 100644 --- a/kubernetes/blackduck/templates/authentication.yaml +++ b/kubernetes/blackduck/templates/authentication.yaml @@ -88,7 +88,7 @@ spec: containers: - env: - name: HUB_MAX_MEMORY - value: {{ .Values.authentication.hubMaxMemory }} + value: {{ include "computeHubMaxMemory" .Values.authentication }} {{- if .Values.authentication.hubMinMemory }} - name: HUB_MIN_MEMORY value: {{ .Values.authentication.hubMinMemory }} diff --git a/kubernetes/blackduck/templates/bomengine.yaml b/kubernetes/blackduck/templates/bomengine.yaml index 6eb425b..79d4f10 100644 --- a/kubernetes/blackduck/templates/bomengine.yaml +++ b/kubernetes/blackduck/templates/bomengine.yaml @@ -63,7 +63,7 @@ spec: containers: - env: - name: HUB_MAX_MEMORY - value: {{ .Values.bomengine.hubMaxMemory }} + value: {{ include "computeHubMaxMemory" .Values.bomengine }} {{- if .Values.bomengine.hubMinMemory }} - name: HUB_MIN_MEMORY value: {{ .Values.bomengine.hubMinMemory }} diff --git a/kubernetes/blackduck/templates/integration.yaml b/kubernetes/blackduck/templates/integration.yaml index b2946ba..1869af6 100644 --- a/kubernetes/blackduck/templates/integration.yaml +++ b/kubernetes/blackduck/templates/integration.yaml @@ -63,7 +63,7 @@ spec: containers: - env: - name: HUB_MAX_MEMORY - value: {{ .Values.integration.hubMaxMemory }} + value: {{ include "computeHubMaxMemory" .Values.integration }} {{- if .Values.integration.hubMinMemory }} - name: HUB_MIN_MEMORY value: {{ .Values.integration.hubMinMemory }} diff --git a/kubernetes/blackduck/templates/jobrunner.yaml b/kubernetes/blackduck/templates/jobrunner.yaml index b2a28c7..7716ff4 100644 --- a/kubernetes/blackduck/templates/jobrunner.yaml +++ b/kubernetes/blackduck/templates/jobrunner.yaml @@ -64,7 +64,7 @@ spec: containers: - env: - name: HUB_MAX_MEMORY - value: {{ .Values.jobrunner.hubMaxMemory }} + value: {{ include "computeHubMaxMemory" .Values.jobrunner }} {{- if .Values.jobrunner.hubMinMemory }} - name: HUB_MIN_MEMORY value: {{ .Values.jobrunner.hubMinMemory }} diff --git a/kubernetes/blackduck/templates/matchengine.yaml b/kubernetes/blackduck/templates/matchengine.yaml index c615666..94586e1 100644 --- a/kubernetes/blackduck/templates/matchengine.yaml +++ b/kubernetes/blackduck/templates/matchengine.yaml @@ -63,7 +63,7 @@ spec: containers: - env: - name: HUB_MAX_MEMORY - value: {{ .Values.matchengine.hubMaxMemory }} + value: {{ include "computeHubMaxMemory" .Values.matchengine }} {{- if .Values.matchengine.hubMinMemory }} - name: HUB_MIN_MEMORY value: {{ .Values.matchengine.hubMinMemory }} diff --git a/kubernetes/blackduck/templates/registration.yaml b/kubernetes/blackduck/templates/registration.yaml index 0eaefac..ca53df4 100644 --- a/kubernetes/blackduck/templates/registration.yaml +++ b/kubernetes/blackduck/templates/registration.yaml @@ -88,7 +88,7 @@ spec: containers: - env: - name: HUB_MAX_MEMORY - value: {{ .Values.registration.hubMaxMemory }} + value: {{ include "computeHubMaxMemory" .Values.registration }} {{- if .Values.registration.hubMinMemory }} - name: HUB_MIN_MEMORY value: {{ .Values.registration.hubMinMemory }} diff --git a/kubernetes/blackduck/templates/rl.yaml b/kubernetes/blackduck/templates/rl.yaml new file mode 100644 index 0000000..555064f --- /dev/null +++ b/kubernetes/blackduck/templates/rl.yaml @@ -0,0 +1,183 @@ +{{- if .Values.rlservice.enabled }} +{{- $_ := set . "serviceName" "rlservice" }} +--- +apiVersion: v1 +kind: Service +metadata: + labels: + {{- include "bd.labelsWithoutVersion" . | nindent 4 }} + component: rlservice + name: {{ .Release.Name }}-rlservice + namespace: {{ .Release.Namespace }} +spec: + ports: + - name: port-8443 + port: 8443 + protocol: TCP + targetPort: 8443 + selector: + {{- include "bd.selectorLabelsWithoutVersion" . | nindent 4 }} + component: rlservice + type: ClusterIP +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + {{- include "bd.labels" . | nindent 4 }} + component: rlservice + name: {{ .Release.Name }}-rlservice + namespace: {{ .Release.Namespace }} +spec: + {{- if eq .Values.status "Running" }} + replicas: {{ .Values.rlservice.replicas }} + {{- else }} + replicas: 0 + {{- end }} + selector: + matchLabels: + {{- include "bd.selectorLabelsWithoutVersion" . | nindent 6 }} + component: rlservice + strategy: + type: Recreate + template: + metadata: + labels: + {{- include "bd.selectorLabels" . | nindent 8 }} + component: rlservice + annotations: + {{- include "bd.prometheus.common.annotations" . | nindent 8 }} + checksum/blackduck-config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/postgres-config: {{ include (print $.Template.BasePath "/postgres-config.yaml") . | sha256sum }} + {{- if .Values.rlservice.podAnnotations }} + {{- range $key, $value := .Values.rlservice.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + name: {{ .Release.Name }}-rlservice + spec: + {{- if .Values.enableInitContainer }} + initContainers: + {{- include "bd.datadog.java.initcontainer" . | indent 6 }} + {{- include "bd.postgresql.up.check.initcontainer" . | indent 6 }} + {{- end }} + containers: + - env: + - name: HUB_MAX_MEMORY + value: {{ .Values.rlservice.hubMaxMemory }} + {{- if .Values.rlservice.hubMinMemory }} + - name: HUB_MIN_MEMORY + value: {{ .Values.rlservice.hubMinMemory }} + {{- end }} + - name: blackduck.hub.pool.max.active + {{- if .Values.rlservice.dbPoolMaxActive }} + value: {{ quote .Values.rlservice.dbPoolMaxActive }} + {{- else }} + value: {{ quote .Values.dbPoolMaxActive }} + {{- end -}} + {{- include "bd.datadog.java.env" . | indent 8 }} + - name: DD_ENABLED + value: {{ .Values.datadog.enabled | quote }} + - name: HUB_MANAGEMENT_ENDPOINT_PROMETHEUS_ENABLED + value: {{ .Values.metrics.enabled | quote }} + envFrom: + - configMapRef: + name: {{ .Release.Name }}-blackduck-config + - configMapRef: + name: {{ .Release.Name }}-blackduck-db-config + {{- if .Values.rlservice.registry }} + {{- if .Values.rlservice.imageTag }} + image: {{ .Values.rlservice.registry }}/rl-service:{{ .Values.rlservice.imageTag }} + {{- else }} + image: {{ .Values.rlservice.registry }}/rl-service:{{ .Values.imageTag }} + {{- end}} + {{- else }} + {{- if .Values.rlservice.imageTag }} + image: {{ .Values.registry }}/rl-service:{{ .Values.rlservice.imageTag }} + {{- else }} + image: {{ .Values.registry }}/rl-service:{{ .Values.imageTag }} + {{- end}} + {{- end}} + {{- include "customImagePullPolicy" .Values.rlservice | nindent 8 }} + {{- include "rlserviceStartupProbe" . | nindent 8 }} + {{- include "rlserviceLivenessProbe" . | nindent 8 }} + name: rlservice + ports: + - containerPort: 8443 + protocol: TCP + resources: + {{- toYaml .Values.rlservice.resources | nindent 12 }} + {{- with .Values.rlservice.securityContext }} + securityContext: +{{ toYaml . | indent 10 }} + {{- end }} + volumeMounts: + {{- include "bd.ale.volumemounts" . | indent 8 }} + {{- include "bd.datadog.java.volumemount" . | indent 8 }} + {{- include "bd.postgresql.secrets.volumemounts" . | indent 8 }} + - mountPath: /opt/blackduck/hub/rlservice/security + name: dir-rlservice + {{- include "common.volume.mount" . | indent 8 }} + dnsPolicy: ClusterFirst + {{- include "bd.imagePullSecrets" . | nindent 6 }} + {{- with .Values.rlservice.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.rlservice.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.rlservice.tolerations }} + tolerations: +{{ toYaml . | indent 6 }} + {{- end }} + {{- if .Values.rlservice.podSecurityContext }} + {{- with .Values.rlservice.podSecurityContext }} + securityContext: +{{ toYaml . | indent 8 }} + {{- end }} + {{- else }} + {{- include "bd.podSecurityContext" . | nindent 6 }} + {{- end }} + serviceAccountName: {{ .Release.Name }}-blackduck-service-account + restartPolicy: Always + volumes: + {{- include "bd.datadog.java.volume" . | indent 6 }} + - emptyDir: {} + name: dir-rlservice + {{- include "bd.postgresql.secrets.volumes" . | indent 6 }} + {{- include "common.volumes" . | indent 6 }} + {{- if .Values.enableApplicationLevelEncryption }} + {{- include "bd.ale.volumes" . | indent 6 }} + {{- end }} +{{- end }} + +{{- define "rlserviceStartupProbe" -}} +{{- if .Values.enableLivenessProbe -}} +startupProbe: + httpGet: + path: /rlservice/health + port: 8080 +# Review with cloudops + failureThreshold: {{ mul .Values.maxWaitForSchemaUpdatesMinutes 2 }} + initialDelaySeconds: 240 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 +{{- end -}} +{{- end -}} + +{{- define "rlserviceLivenessProbe" -}} +{{- if .Values.enableLivenessProbe -}} +livenessProbe: + httpGet: + path: /rlservice/health + port: 8080 + failureThreshold: 10 + initialDelaySeconds: 30 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 10 +{{- end -}} +{{- end -}} diff --git a/kubernetes/blackduck/templates/scan.yaml b/kubernetes/blackduck/templates/scan.yaml index 72ef685..d5c0602 100644 --- a/kubernetes/blackduck/templates/scan.yaml +++ b/kubernetes/blackduck/templates/scan.yaml @@ -63,7 +63,7 @@ spec: containers: - env: - name: HUB_MAX_MEMORY - value: {{ .Values.scan.hubMaxMemory }} + value: {{ include "computeHubMaxMemory" .Values.scan }} {{- if .Values.scan.hubMinMemory }} - name: HUB_MIN_MEMORY value: {{ .Values.scan.hubMinMemory }} diff --git a/kubernetes/blackduck/templates/storage.yaml b/kubernetes/blackduck/templates/storage.yaml index 8152729..cf20b84 100644 --- a/kubernetes/blackduck/templates/storage.yaml +++ b/kubernetes/blackduck/templates/storage.yaml @@ -63,7 +63,7 @@ spec: containers: - env: - name: HUB_MAX_MEMORY - value: {{ .Values.storage.hubMaxMemory }} + value: {{ include "computeHubMaxMemory" .Values.storage }} {{- if .Values.storage.hubMinMemory }} - name: HUB_MIN_MEMORY value: {{ .Values.storage.hubMinMemory }} diff --git a/kubernetes/blackduck/templates/webapp-logstash.yaml b/kubernetes/blackduck/templates/webapp-logstash.yaml index 6092f56..b27d004 100644 --- a/kubernetes/blackduck/templates/webapp-logstash.yaml +++ b/kubernetes/blackduck/templates/webapp-logstash.yaml @@ -107,7 +107,7 @@ spec: containers: - env: - name: HUB_MAX_MEMORY - value: {{ .Values.webapp.hubMaxMemory }} + value: {{ include "computeHubMaxMemory" .Values.webapp }} {{- if .Values.webapp.hubMinMemory }} - name: HUB_MIN_MEMORY value: {{ .Values.webapp.hubMinMemory }} @@ -165,7 +165,7 @@ spec: {{- include "common.volume.mount" . | indent 8 }} - env: - name: HUB_MAX_MEMORY - value: {{ .Values.logstash.hubMaxMemory }} + value: {{ include "computeHubMaxMemory" .Values.logstash }} {{- if .Values.logstash.hubMinMemory }} - name: HUB_MIN_MEMORY value: {{ .Values.logstash.hubMinMemory }} diff --git a/kubernetes/blackduck/templates/webui.yaml b/kubernetes/blackduck/templates/webui.yaml deleted file mode 100644 index d9844a3..0000000 --- a/kubernetes/blackduck/templates/webui.yaml +++ /dev/null @@ -1,139 +0,0 @@ -{{- $_ := set . "serviceName" "blackduck-ui" }} ---- -apiVersion: v1 -kind: Service -metadata: - labels: - {{- include "bd.labelsWithoutVersion" . | nindent 4 }} - component: webui - name: {{ .Release.Name }}-blackduck-ui - namespace: {{ .Release.Namespace }} -spec: - ports: - - name: port-8443 - port: 8443 - protocol: TCP - targetPort: 8443 - selector: - {{- include "bd.selectorLabelsWithoutVersion" . | nindent 4 }} - component: webui - type: ClusterIP ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - {{- include "bd.labels" . | nindent 4 }} - component: webui - name: {{ .Release.Name }}-blackduck-ui - namespace: {{ .Release.Namespace }} -spec: - {{- if eq .Values.status "Running" }} - replicas: {{ .Values.webui.replicas }} - {{- else }} - replicas: 0 - {{- end }} - selector: - matchLabels: - {{- include "bd.selectorLabelsWithoutVersion" . | nindent 6 }} - component: webui - strategy: - type: Recreate - template: - metadata: - labels: - {{- include "bd.selectorLabels" . | nindent 8 }} - component: webui - annotations: - checksum/blackduck-config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} - {{- if .Values.webui.podAnnotations }} - {{- range $key, $value := .Values.webui.podAnnotations }} - {{ $key }}: {{ $value | quote }} - {{- end }} - {{- end }} - name: {{ .Release.Name }}-blackduck-ui - spec: - {{- if .Values.enableInitContainer }} - initContainers: - {{- include "bd.postgresql.up.check.initcontainer" . | indent 6 }} - {{- end }} - containers: - - env: - envFrom: - - configMapRef: - name: {{ .Release.Name }}-blackduck-config - {{- if .Values.webui.registry }} - {{- if .Values.webui.imageTag }} - image: {{ .Values.webui.registry }}/blackduck-webui:{{ .Values.webui.imageTag }} - {{- else }} - image: {{ .Values.webui.registry }}/blackduck-webui:{{ .Values.imageTag }} - {{- end}} - {{- else }} - {{- if .Values.webui.imageTag }} - image: {{ .Values.registry }}/blackduck-webui:{{ .Values.webui.imageTag }} - {{- else }} - image: {{ .Values.registry }}/blackduck-webui:{{ .Values.imageTag }} - {{- end}} - {{- end}} - {{- include "customImagePullPolicy" .Values.webui | nindent 8 }} - {{- include "webuiLivenessProbe" . | nindent 8 }} - name: webui - ports: - - containerPort: 8443 - protocol: TCP - resources: - {{- toYaml .Values.webui.resources | nindent 12 }} - {{- with .Values.webui.securityContext }} - securityContext: -{{ toYaml . | indent 10 }} - {{- end }} - volumeMounts: - - mountPath: /opt/blackduck/hub/hub-ui/security - name: dir-webui - {{- include "common.volume.mount" . | indent 8 }} - dnsPolicy: ClusterFirst - {{- include "bd.imagePullSecrets" . | nindent 6 }} - {{- with .Values.webui.nodeSelector }} - nodeSelector: -{{ toYaml . | indent 8 }} - {{- end }} - {{- with .Values.webui.affinity }} - affinity: -{{ toYaml . | indent 8 }} - {{- end }} - {{- with .Values.webui.tolerations }} - tolerations: -{{ toYaml . | indent 6 }} - {{- end }} - {{- if .Values.webui.podSecurityContext }} - {{- with .Values.webui.podSecurityContext }} - securityContext: -{{ toYaml . | indent 8 }} - {{- end }} - {{- else }} - {{- include "bd.podSecurityContext" . | nindent 6 }} - {{- end }} - serviceAccountName: {{ .Release.Name }}-blackduck-service-account - restartPolicy: Always - volumes: - - emptyDir: {} - name: dir-webui - {{- include "common.volumes" . | indent 6 }} - -{{- define "webuiLivenessProbe" -}} -{{- if .Values.enableLivenessProbe -}} -livenessProbe: - exec: - command: - - /usr/local/bin/docker-healthcheck.sh - - https://127.0.0.1:8443/health-checks/liveness - - /opt/blackduck/hub/hub-ui/security/root.crt - - /opt/blackduck/hub/hub-ui/security/hub-ui-server.crt - - /opt/blackduck/hub/hub-ui/security/hub-ui-server.key - failureThreshold: 10 - initialDelaySeconds: 240 - periodSeconds: 30 - successThreshold: 1 - timeoutSeconds: 10 -{{- end -}} -{{- end -}} diff --git a/kubernetes/blackduck/values.ubi.yaml b/kubernetes/blackduck/values.ubi.yaml index d528ad1..39255b2 100644 --- a/kubernetes/blackduck/values.ubi.yaml +++ b/kubernetes/blackduck/values.ubi.yaml @@ -8,7 +8,7 @@ fullnameOverride: isKubernetes: true isAzure: false -imageTag: "2024.1.1" +imageTag: "2024.4.0" # Docker registry to pull Black Duck images registry: docker.io/blackducksoftware @@ -171,7 +171,7 @@ postgres: podSecurityContext: {} securityContext: {} resources: {} - imageTag: 14-1.21_ubi9.3 + imageTag: 14-1.22_ubi9.3 ### Postgres main data volume # pvc related parameters for postgres container. set if you want to create your own PVC persistentVolumeClaimName: @@ -207,7 +207,7 @@ postgresWaiter: registry: securityContext: {} resources: {} - imageTag: 1.0.11 + imageTag: 1.0.12 imagePullPolicy: IfNotPresent postgresInit: @@ -226,7 +226,7 @@ authentication: # override the docker registry at container level registry: # override the global imageTag - imageTag: 2024.1.1_ubi9.3 + imageTag: 2024.4.0_ubi9.3 # pvc related parameters for authentication container. set if you want to create your own PVC (used for migration) persistentVolumeClaimName: # PVC claim size @@ -252,7 +252,7 @@ binaryscanner: # override the docker registry at container level registry: "docker.io/sigsynopsys" # override the global imageTag - imageTag: 2023.12.3 + imageTag: 2024.3.0 nodeSelector: {} tolerations: [] affinity: {} @@ -273,7 +273,7 @@ bomengine: # override the docker registry at container level registry: # override the global imageTag - imageTag: 2024.1.1_ubi9.3 + imageTag: 2024.4.0_ubi9.3 dbPoolMaxActive: affinity: {} nodeSelector: {} @@ -289,7 +289,7 @@ cfssl: # override the docker registry at container level registry: # override the global imageTag - imageTag: 1.0.25_ubi9.3 + imageTag: 1.0.26_ubi9.3 # pvc related parameters for cfssl container. set if you want to create your own PVC (used for migration) persistentVolumeClaimName: # PVC claim size @@ -313,7 +313,7 @@ documentation: # override the docker registry at container level registry: # override the global imageTag - imageTag: 2024.1.1_ubi9.3 + imageTag: 2024.4.0_ubi9.3 nodeSelector: {} tolerations: [] affinity: {} @@ -329,7 +329,7 @@ jobrunner: # override the docker registry at container level registry: # override the global imageTag - imageTag: 2024.1.1_ubi9.3 + imageTag: 2024.4.0_ubi9.3 dbPoolMaxActive: nodeSelector: {} tolerations: [] @@ -352,7 +352,7 @@ rabbitmq: # override the docker registry at container level registry: # override the global imageTag - imageTag: 1.2.36_ubi9.3 + imageTag: 1.2.37_ubi9.3 # pvc related parameters for rabbitmq container persistentVolumeClaimName: # PVC claim size @@ -372,7 +372,7 @@ redis: # override the docker registry at container level registry: # override the global imageTag - imageTag: 2024.1.1_ubi9.3 + imageTag: 2024.4.0_ubi9.3 tlsEnabled: false maxTotal: 128 maxIdle: 128 @@ -392,7 +392,7 @@ registration: # override the docker registry at container level registry: # override the global imageTag - imageTag: 2024.1.1_ubi9.3 + imageTag: 2024.4.0_ubi9.3 # pvc related parameters for registration container. set if you want to create your own PVC (used for migration) persistentVolumeClaimName: # PVC claim size @@ -416,11 +416,44 @@ registration: podAnnotations: {} imagePullPolicy: IfNotPresent +rlservice: + enabled: false + # override the docker registry at container level + registry: + # no. of service replicas + replicas: 1 + # override the global imageTag + imageTag: 2024.4.0 + # pvc related parameters for rlservice container. set if you want to create your own PVC (used for migration) + persistentVolumeClaimName: + # PVC claim size + claimSize: "2Gi" + # PVC storage class name + storageClass: + # existing persistent volume name backing the PVC + volumeName: + dbPoolMaxActive: + nodeSelector: {} + tolerations: [] + affinity: {} + podSecurityContext: {} + securityContext: {} + hubMaxMemory: "5530m" + resources: + requests: + cpu: "2000m" + memory: "6144Mi" + limits: + cpu: "2000m" + memory: "6144Mi" + podAnnotations: {} + imagePullPolicy: IfNotPresent + scan: # override the docker registry at container level registry: # override the global imageTag - imageTag: 2024.1.1_ubi9.3 + imageTag: 2024.4.0_ubi9.3 dbPoolMaxActive: nodeSelector: {} tolerations: [] @@ -438,7 +471,7 @@ storage: # override the docker registry at container level registry: # override the global imageTag - imageTag: 2024.1.1_ubi9.3 + imageTag: 2024.4.0_ubi9.3 # PVC related parameters for storage cache container. # If you want to use already existing persistance volume for blackduck tools & reports storage, use this option to configure the existing pvc name. persistentVolumeClaimName: @@ -465,7 +498,7 @@ webapp: # override the docker registry at container level registry: # override the global imageTag - imageTag: 2024.1.1_ubi9.3 + imageTag: 2024.4.0_ubi9.3 # pvc related parameters for webapp container. set if you want to create your own PVC (used for migration) persistentVolumeClaimName: # PVC claim size @@ -489,7 +522,7 @@ logstash: # override the docker registry at container level registry: # override the global imageTag - imageTag: 1.0.35_ubi9.3 + imageTag: 1.0.36_ubi9.3 # pvc related parameters for logstash container. set if you want to create your own PVC (used for migration) persistentVolumeClaimName: # PVC claim size @@ -514,7 +547,7 @@ webserver: # override the docker registry at container level registry: # override the global imageTag - imageTag: 2.0.66_ubi9.3 + imageTag: 2024.4.0_ubi9.3 nodeSelector: {} tolerations: [] affinity: {} @@ -524,36 +557,18 @@ webserver: podAnnotations: {} imagePullPolicy: IfNotPresent -webui: - # override the docker registry at container level - registry: - imageTag: 2024.1.1_ubi9.3 - nodeSelector: {} - tolerations: [] - affinity: {} - podSecurityContext: {} - securityContext: {} - resources: - requests: - cpu: "500m" - limits: - cpu: "1000m" - memory: "640Mi" - podAnnotations: {} - imagePullPolicy: IfNotPresent - datadog: enabled: false # override the docker registry at container level registry: - imageTag: "1.0.12" + imageTag: "1.0.14" imagePullPolicy: IfNotPresent matchengine: # override the docker registry at container level registry: # override the global imageTag - imageTag: 2024.1.1_ubi9.3 + imageTag: 2024.4.0_ubi9.3 dbPoolMaxActive: nodeSelector: {} tolerations: [] @@ -572,7 +587,7 @@ integration: # override the docker registry at container level registry: # override the global imageTag - imageTag: 2024.1.1_ubi9.3 + imageTag: 2024.4.0_ubi9.3 dbPoolMaxActive: affinity: {} nodeSelector: {} @@ -580,7 +595,7 @@ integration: securityContext: {} tolerations: [] replicas: 1 - hubMaxMemory: "4608m" + maxRamPercentage: 90 resources: limits: cpu: "1000m" diff --git a/kubernetes/blackduck/values.yaml b/kubernetes/blackduck/values.yaml index bfd5530..1931623 100644 --- a/kubernetes/blackduck/values.yaml +++ b/kubernetes/blackduck/values.yaml @@ -8,7 +8,7 @@ fullnameOverride: isKubernetes: true isAzure: false -imageTag: "2024.1.1" +imageTag: "2024.4.0" # Docker registry to pull Black Duck images registry: docker.io/blackducksoftware @@ -171,7 +171,7 @@ postgres: podSecurityContext: {} securityContext: {} resources: {} - imageTag: 14-1.21 + imageTag: 14-1.22 ### Postgres main data volume # pvc related parameters for postgres container. set if you want to create your own PVC persistentVolumeClaimName: @@ -207,7 +207,7 @@ postgresWaiter: registry: securityContext: {} resources: {} - imageTag: 1.0.11 + imageTag: 1.0.12 imagePullPolicy: IfNotPresent postgresInit: @@ -226,7 +226,7 @@ authentication: # override the docker registry at container level registry: # override the global imageTag - imageTag: 2024.1.1 + imageTag: 2024.4.0 # pvc related parameters for authentication container. set if you want to create your own PVC (used for migration) persistentVolumeClaimName: # PVC claim size @@ -252,7 +252,7 @@ binaryscanner: # override the docker registry at container level registry: "docker.io/sigsynopsys" # override the global imageTag - imageTag: 2023.12.3 + imageTag: 2024.3.0 nodeSelector: {} tolerations: [] affinity: {} @@ -273,7 +273,7 @@ bomengine: # override the docker registry at container level registry: # override the global imageTag - imageTag: 2024.1.1 + imageTag: 2024.4.0 dbPoolMaxActive: affinity: {} nodeSelector: {} @@ -289,7 +289,7 @@ cfssl: # override the docker registry at container level registry: # override the global imageTag - imageTag: 1.0.25 + imageTag: 1.0.26 # pvc related parameters for cfssl container. set if you want to create your own PVC (used for migration) persistentVolumeClaimName: # PVC claim size @@ -313,7 +313,7 @@ documentation: # override the docker registry at container level registry: # override the global imageTag - imageTag: 2024.1.1 + imageTag: 2024.4.0 nodeSelector: {} tolerations: [] affinity: {} @@ -329,7 +329,7 @@ jobrunner: # override the docker registry at container level registry: # override the global imageTag - imageTag: 2024.1.1 + imageTag: 2024.4.0 dbPoolMaxActive: nodeSelector: {} tolerations: [] @@ -352,7 +352,7 @@ rabbitmq: # override the docker registry at container level registry: # override the global imageTag - imageTag: 1.2.36 + imageTag: 1.2.37 # pvc related parameters for rabbitmq container persistentVolumeClaimName: # PVC claim size @@ -372,7 +372,7 @@ redis: # override the docker registry at container level registry: # override the global imageTag - imageTag: 2024.1.1 + imageTag: 2024.4.0 tlsEnabled: false maxTotal: 128 maxIdle: 128 @@ -392,7 +392,7 @@ registration: # override the docker registry at container level registry: # override the global imageTag - imageTag: 2024.1.1 + imageTag: 2024.4.0 # pvc related parameters for registration container. set if you want to create your own PVC (used for migration) persistentVolumeClaimName: # PVC claim size @@ -416,11 +416,44 @@ registration: podAnnotations: {} imagePullPolicy: IfNotPresent +rlservice: + enabled: false + # override the docker registry at container level + registry: + # no. of service replicas + replicas: 1 + # override the global imageTag + imageTag: 2024.4.0 + # pvc related parameters for rlservice container. set if you want to create your own PVC (used for migration) + persistentVolumeClaimName: + # PVC claim size + claimSize: "2Gi" + # PVC storage class name + storageClass: + # existing persistent volume name backing the PVC + volumeName: + dbPoolMaxActive: + nodeSelector: {} + tolerations: [] + affinity: {} + podSecurityContext: {} + securityContext: {} + hubMaxMemory: "5530m" + resources: + requests: + cpu: "2000m" + memory: "6144Mi" + limits: + cpu: "2000m" + memory: "6144Mi" + podAnnotations: {} + imagePullPolicy: IfNotPresent + scan: # override the docker registry at container level registry: # override the global imageTag - imageTag: 2024.1.1 + imageTag: 2024.4.0 dbPoolMaxActive: nodeSelector: {} tolerations: [] @@ -438,7 +471,7 @@ storage: # override the docker registry at container level registry: # override the global imageTag - imageTag: 2024.1.1 + imageTag: 2024.4.0 # PVC related parameters for storage cache container. # If you want to use already existing persistance volume for blackduck tools & reports storage, use this option to configure the existing pvc name. persistentVolumeClaimName: @@ -465,7 +498,7 @@ webapp: # override the docker registry at container level registry: # override the global imageTag - imageTag: 2024.1.1 + imageTag: 2024.4.0 # pvc related parameters for webapp container. set if you want to create your own PVC (used for migration) persistentVolumeClaimName: # PVC claim size @@ -489,7 +522,7 @@ logstash: # override the docker registry at container level registry: # override the global imageTag - imageTag: 1.0.35 + imageTag: 1.0.36 # pvc related parameters for logstash container. set if you want to create your own PVC (used for migration) persistentVolumeClaimName: # PVC claim size @@ -514,7 +547,7 @@ webserver: # override the docker registry at container level registry: # override the global imageTag - imageTag: 2.0.66 + imageTag: 2024.4.0 nodeSelector: {} tolerations: [] affinity: {} @@ -524,36 +557,18 @@ webserver: podAnnotations: {} imagePullPolicy: IfNotPresent -webui: - # override the docker registry at container level - registry: - imageTag: 2024.1.1 - nodeSelector: {} - tolerations: [] - affinity: {} - podSecurityContext: {} - securityContext: {} - resources: - requests: - cpu: "500m" - limits: - cpu: "1000m" - memory: "640Mi" - podAnnotations: {} - imagePullPolicy: IfNotPresent - datadog: enabled: false # override the docker registry at container level registry: - imageTag: "1.0.12" + imageTag: "1.0.14" imagePullPolicy: IfNotPresent matchengine: # override the docker registry at container level registry: # override the global imageTag - imageTag: 2024.1.1 + imageTag: 2024.4.0 dbPoolMaxActive: nodeSelector: {} tolerations: [] @@ -572,7 +587,7 @@ integration: # override the docker registry at container level registry: # override the global imageTag - imageTag: 2024.1.1 + imageTag: 2024.4.0 dbPoolMaxActive: affinity: {} nodeSelector: {} @@ -580,7 +595,7 @@ integration: securityContext: {} tolerations: [] replicas: 1 - hubMaxMemory: "4608m" + maxRamPercentage: 90 resources: limits: cpu: "1000m"