From e0582efeb06505f9c224beda593c48f88dcd0b08 Mon Sep 17 00:00:00 2001 From: Benjamin Bordes Date: Mon, 11 Nov 2024 16:47:55 +0100 Subject: [PATCH] feat: add support for trivy dbRepository and javaDBRepository Signed-off-by: Benjamin Bordes --- README.md | 682 +++++++++++++++++---------------- templates/trivy/trivy-sts.yaml | 4 + values.yaml | 5 +- 3 files changed, 351 insertions(+), 340 deletions(-) diff --git a/README.md b/README.md index ec394b84a..9d3c45404 100644 --- a/README.md +++ b/README.md @@ -60,6 +60,7 @@ If Harbor is deployed behind the proxy, set it as the URL of proxy. ### Install the chart Install the Harbor helm chart with a release name `my-release`: + ```bash helm install my-release harbor/harbor ``` @@ -67,6 +68,7 @@ helm install my-release harbor/harbor ## Uninstallation To uninstall/delete the `my-release` deployment: + ```bash helm uninstall my-release ``` @@ -75,345 +77,347 @@ helm uninstall my-release The following table lists the configurable parameters of the Harbor chart and the default values. -| Parameter | Description | Default | -|-----------------------------------------------------------------------| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------- | -| **Expose** | | | -| `expose.type` | How to expose the service: `ingress`, `clusterIP`, `nodePort` or `loadBalancer`, other values will be ignored and the creation of service will be skipped. | `ingress` | -| `expose.tls.enabled` | Enable TLS or not. Delete the `ssl-redirect` annotations in `expose.ingress.annotations` when TLS is disabled and `expose.type` is `ingress`. Note: if the `expose.type` is `ingress` and TLS is disabled, the port must be included in the command when pulling/pushing images. Refer to https://github.com/goharbor/harbor/issues/5291 for details. | `true` | -| `expose.tls.certSource` | The source of the TLS certificate. Set as `auto`, `secret` or `none` and fill the information in the corresponding section: 1) auto: generate the TLS certificate automatically 2) secret: read the TLS certificate from the specified secret. The TLS certificate can be generated manually or by cert manager 3) none: configure no TLS certificate for the ingress. If the default TLS certificate is configured in the ingress controller, choose this option | `auto` | -| `expose.tls.auto.commonName` | The common name used to generate the certificate, it's necessary when the type isn't `ingress` | | -| `expose.tls.secret.secretName` | The name of secret which contains keys named: `tls.crt` - the certificate; `tls.key` - the private key | | -| `expose.ingress.hosts.core` | The host of Harbor core service in ingress rule | `core.harbor.domain` | -| `expose.ingress.controller` | The ingress controller type. Currently supports `default`, `gce`, `alb`, `f5-bigip` and `ncp` | `default` | -| `expose.ingress.kubeVersionOverride` | Allows the ability to override the kubernetes version used while templating the ingress | | -| `expose.ingress.annotations` | The annotations used commonly for ingresses | | -| `expose.ingress.labels` | The labels specific to ingress | {} | -| `expose.clusterIP.name` | The name of ClusterIP service | `harbor` | -| `expose.clusterIP.annotations` | The annotations attached to the ClusterIP service | {} | -| `expose.clusterIP.ports.httpPort` | The service port Harbor listens on when serving HTTP | `80` | -| `expose.clusterIP.ports.httpsPort` | The service port Harbor listens on when serving HTTPS | `443` | -| `expose.clusterIP.annotations` | The annotations used commonly for clusterIP | | -| `expose.clusterIP.labels` | The labels specific to clusterIP | {} | -| `expose.nodePort.name` | The name of NodePort service | `harbor` | -| `expose.nodePort.ports.http.port` | The service port Harbor listens on when serving HTTP | `80` | -| `expose.nodePort.ports.http.nodePort` | The node port Harbor listens on when serving HTTP | `30002` | -| `expose.nodePort.ports.https.port` | The service port Harbor listens on when serving HTTPS | `443` | -| `expose.nodePort.ports.https.nodePort` | The node port Harbor listens on when serving HTTPS | `30003` | -| `expose.nodePort.annotations` | The annotations used commonly for nodePort | | -| `expose.nodePort.labels` | The labels specific to nodePort | {} | -| `expose.loadBalancer.name` | The name of service | `harbor` | -| `expose.loadBalancer.IP` | The IP of the loadBalancer. It only works when loadBalancer supports assigning IP | `""` | -| `expose.loadBalancer.ports.httpPort` | The service port Harbor listens on when serving HTTP | `80` | -| `expose.loadBalancer.ports.httpsPort` | The service port Harbor listens on when serving HTTPS | `30002` | -| `expose.loadBalancer.annotations` | The annotations attached to the loadBalancer service | {} | -| `expose.loadBalancer.labels` | The labels specific to loadBalancer | {} | -| `expose.loadBalancer.sourceRanges` | List of IP address ranges to assign to loadBalancerSourceRanges | [] | -| **Internal TLS** | | | -| `internalTLS.enabled` | Enable TLS for the components (core, jobservice, portal, registry, trivy) | `false` | -| `internalTLS.strong_ssl_ciphers` | Enable strong ssl ciphers for nginx and portal | `false` -| `internalTLS.certSource` | Method to provide TLS for the components, options are `auto`, `manual`, `secret`. | `auto` | -| `internalTLS.trustCa` | The content of trust CA, only available when `certSource` is `manual`. **Note**: all the internal certificates of the components must be issued by this CA | | -| `internalTLS.core.secretName` | The secret name for core component, only available when `certSource` is `secret`. The secret must contain keys named: `ca.crt` - the CA certificate which is used to issue internal key and crt pair for components and all Harbor components must be issued by the same CA, `tls.crt` - the content of the TLS cert file, `tls.key` - the content of the TLS key file. | | -| `internalTLS.core.crt` | Content of core's TLS cert file, only available when `certSource` is `manual` | | -| `internalTLS.core.key` | Content of core's TLS key file, only available when `certSource` is `manual` | | -| `internalTLS.jobservice.secretName` | The secret name for jobservice component, only available when `certSource` is `secret`. The secret must contain keys named: `ca.crt` - the CA certificate which is used to issue internal key and crt pair for components and all Harbor components must be issued by the same CA, `tls.crt` - the content of the TLS cert file, `tls.key` - the content of the TLS key file. | | -| `internalTLS.jobservice.crt` | Content of jobservice's TLS cert file, only available when `certSource` is `manual` | | -| `internalTLS.jobservice.key` | Content of jobservice's TLS key file, only available when `certSource` is `manual` | | -| `internalTLS.registry.secretName` | The secret name for registry component, only available when `certSource` is `secret`. The secret must contain keys named: `ca.crt` - the CA certificate which is used to issue internal key and crt pair for components and all Harbor components must be issued by the same CA, `tls.crt` - the content of the TLS cert file, `tls.key` - the content of the TLS key file. | | -| `internalTLS.registry.crt` | Content of registry's TLS cert file, only available when `certSource` is `manual` | | -| `internalTLS.registry.key` | Content of registry's TLS key file, only available when `certSource` is `manual` | | -| `internalTLS.portal.secretName` | The secret name for portal component, only available when `certSource` is `secret`. The secret must contain keys named: `ca.crt` - the CA certificate which is used to issue internal key and crt pair for components and all Harbor components must be issued by the same CA, `tls.crt` - the content of the TLS cert file, `tls.key` - the content of the TLS key file. | | -| `internalTLS.portal.crt` | Content of portal's TLS cert file, only available when `certSource` is `manual` | | -| `internalTLS.portal.key` | Content of portal's TLS key file, only available when `certSource` is `manual` | | -| `internalTLS.trivy.secretName` | The secret name for trivy component, only available when `certSource` is `secret`. The secret must contain keys named: `ca.crt` - the CA certificate which is used to issue internal key and crt pair for components and all Harbor components must be issued by the same CA, `tls.crt` - the content of the TLS cert file, `tls.key` - the content of the TLS key file. | | -| `internalTLS.trivy.crt` | Content of trivy's TLS cert file, only available when `certSource` is `manual` | | -| `internalTLS.trivy.key` | Content of trivy's TLS key file, only available when `certSource` is `manual` | | -| **IPFamily** | | | -| `ipFamily.ipv4.enabled` | if cluster is ipv4 enabled, all ipv4 related configs will set correspondingly, but currently it only affects the nginx related components | `true` | -| `ipFamily.ipv6.enabled` | if cluster is ipv6 enabled, all ipv6 related configs will set correspondingly, but currently it only affects the nginx related components | `true` | -| **Persistence** | | | -| `persistence.enabled` | Enable the data persistence or not | `true` | -| `persistence.resourcePolicy` | Setting it to `keep` to avoid removing PVCs during a helm delete operation. Leaving it empty will delete PVCs after the chart deleted. Does not affect PVCs created for internal database and redis components. | `keep` | -| `persistence.persistentVolumeClaim.registry.existingClaim` | Use the existing PVC which must be created manually before bound, and specify the `subPath` if the PVC is shared with other components | | -| `persistence.persistentVolumeClaim.registry.storageClass` | Specify the `storageClass` used to provision the volume. Or the default StorageClass will be used (the default). Set it to `-` to disable dynamic provisioning | | -| `persistence.persistentVolumeClaim.registry.subPath` | The sub path used in the volume | | -| `persistence.persistentVolumeClaim.registry.accessMode` | The access mode of the volume | `ReadWriteOnce` | -| `persistence.persistentVolumeClaim.registry.size` | The size of the volume | `5Gi` | -| `persistence.persistentVolumeClaim.registry.annotations` | The annotations of the volume | | -| `persistence.persistentVolumeClaim.jobservice.jobLog.existingClaim` | Use the existing PVC which must be created manually before bound, and specify the `subPath` if the PVC is shared with other components. | | -| `persistence.persistentVolumeClaim.jobservice.jobLog.storageClass` | Specify the `storageClass` used to provision the volume. Or the default StorageClass will be used (the default). Set it to `-` to disable dynamic provisioning | | -| `persistence.persistentVolumeClaim.jobservice.jobLog.subPath` | The sub path used in the volume | | -| `persistence.persistentVolumeClaim.jobservice.jobLog.accessMode` | The access mode of the volume | `ReadWriteOnce` | -| `persistence.persistentVolumeClaim.jobservice.jobLog.size` | The size of the volume | `1Gi` | -| `persistence.persistentVolumeClaim.jobservice.jobLog.annotations` | The annotations of the volume | | -| `persistence.persistentVolumeClaim.database.existingClaim` | Use the existing PVC which must be created manually before bound, and specify the `subPath` if the PVC is shared with other components. If external database is used, the setting will be ignored | | -| `persistence.persistentVolumeClaim.database.storageClass` | Specify the `storageClass` used to provision the volume. Or the default StorageClass will be used (the default). Set it to `-` to disable dynamic provisioning. If external database is used, the setting will be ignored | | -| `persistence.persistentVolumeClaim.database.subPath` | The sub path used in the volume. If external database is used, the setting will be ignored | | -| `persistence.persistentVolumeClaim.database.accessMode` | The access mode of the volume. If external database is used, the setting will be ignored | `ReadWriteOnce` | -| `persistence.persistentVolumeClaim.database.size` | The size of the volume. If external database is used, the setting will be ignored | `1Gi` | -| `persistence.persistentVolumeClaim.database.annotations` | The annotations of the volume | | -| `persistence.persistentVolumeClaim.redis.existingClaim` | Use the existing PVC which must be created manually before bound, and specify the `subPath` if the PVC is shared with other components. If external Redis is used, the setting will be ignored | | -| `persistence.persistentVolumeClaim.redis.storageClass` | Specify the `storageClass` used to provision the volume. Or the default StorageClass will be used (the default). Set it to `-` to disable dynamic provisioning. If external Redis is used, the setting will be ignored | | -| `persistence.persistentVolumeClaim.redis.subPath` | The sub path used in the volume. If external Redis is used, the setting will be ignored | | -| `persistence.persistentVolumeClaim.redis.accessMode` | The access mode of the volume. If external Redis is used, the setting will be ignored | `ReadWriteOnce` | -| `persistence.persistentVolumeClaim.redis.size` | The size of the volume. If external Redis is used, the setting will be ignored | `1Gi` | -| `persistence.persistentVolumeClaim.redis.annotations` | The annotations of the volume | | -| `persistence.persistentVolumeClaim.trivy.existingClaim` | Use the existing PVC which must be created manually before bound, and specify the `subPath` if the PVC is shared with other components | | -| `persistence.persistentVolumeClaim.trivy.storageClass` | Specify the `storageClass` used to provision the volume. Or the default StorageClass will be used (the default). Set it to `-` to disable dynamic provisioning | | -| `persistence.persistentVolumeClaim.trivy.subPath` | The sub path used in the volume | | -| `persistence.persistentVolumeClaim.trivy.accessMode` | The access mode of the volume | `ReadWriteOnce` | -| `persistence.persistentVolumeClaim.trivy.size` | The size of the volume | `1Gi` | -| `persistence.persistentVolumeClaim.trivy.annotations` | The annotations of the volume | | -| `persistence.imageChartStorage.disableredirect` | The configuration for managing redirects from content backends. For backends which not supported it (such as using minio for `s3` storage type), please set it to `true` to disable redirects. Refer to the [guide](https://github.com/docker/distribution/blob/master/docs/configuration.md#redirect) for more details | `false` | -| `persistence.imageChartStorage.caBundleSecretName` | Specify the `caBundleSecretName` if the storage service uses a self-signed certificate. The secret must contain keys named `ca.crt` which will be injected into the trust store of registry's and containers. | | -| `persistence.imageChartStorage.type` | The type of storage for images and charts: `filesystem`, `azure`, `gcs`, `s3`, `swift` or `oss`. The type must be `filesystem` if you want to use persistent volumes for registry. Refer to the [guide](https://github.com/docker/distribution/blob/master/docs/configuration.md#storage) for more details | `filesystem` | -| `persistence.imageChartStorage.gcs.existingSecret` | An existing secret containing the gcs service account json key. The key must be gcs-key.json. | `""` | -| `persistence.imageChartStorage.gcs.useWorkloadIdentity` | A boolean to allow the use of workloadidentity in a GKE cluster. To use it, create a kubernetes service account and set the name in the key `serviceAccountName` of each component, then allow automounting the service account. | `false` | -| **General** | | | -| `externalURL` | The external URL for Harbor core service | `https://core.harbor.domain` | -| `caBundleSecretName` | The custom CA bundle secret name, the secret must contain key named "ca.crt" which will be injected into the trust store for core, jobservice, registry, trivy components. | | -| `uaaSecretName` | If using external UAA auth which has a self signed cert, you can provide a pre-created secret containing it under the key `ca.crt`. | | -| `imagePullPolicy` | The image pull policy | | -| `imagePullSecrets` | The imagePullSecrets names for all deployments | | -| `updateStrategy.type` | The update strategy for deployments with persistent volumes(jobservice, registry): `RollingUpdate` or `Recreate`. Set it as `Recreate` when `RWM` for volumes isn't supported | `RollingUpdate` | -| `logLevel` | The log level: `debug`, `info`, `warning`, `error` or `fatal` | `info` | -| `harborAdminPassword` | The initial password of Harbor admin. Change it from portal after launching Harbor | `Harbor12345` | -| `existingSecretAdminPassword` | The name of secret where admin password can be found. | | -| `existingSecretAdminPasswordKey` | The name of the key in the secret where to find harbor admin password Harbor | `HARBOR_ADMIN_PASSWORD` | -| `caSecretName` | The name of the secret which contains key named `ca.crt`. Setting this enables the download link on portal to download the CA certificate when the certificate isn't generated automatically | | -| `secretKey` | The key used for encryption. Must be a string of 16 chars | `not-a-secure-key` | -| `existingSecretSecretKey` | An existing secret containing the encoding secretKey | `""` | -| `proxy.httpProxy` | The URL of the HTTP proxy server | | -| `proxy.httpsProxy` | The URL of the HTTPS proxy server | | -| `proxy.noProxy` | The URLs that the proxy settings not apply to | 127.0.0.1,localhost,.local,.internal | -| `proxy.components` | The component list that the proxy settings apply to | core, jobservice, trivy | -| `enableMigrateHelmHook` | Run the migration job via helm hook, if it is true, the database migration will be separated from harbor-core, run with a preupgrade job migration-job | `false` | -| **Nginx** (if service exposed via `ingress`, Nginx will not be used) | | | -| `nginx.image.repository` | Image repository | `goharbor/nginx-photon` | -| `nginx.image.tag` | Image tag | `dev` | -| `nginx.replicas` | The replica count | `1` | -| `nginx.revisionHistoryLimit` | The revision history limit | `10` | -| `nginx.resources` | The [resources] to allocate for container | undefined | -| `nginx.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | -| `nginx.nodeSelector` | Node labels for pod assignment | `{}` | -| `nginx.tolerations` | Tolerations for pod assignment | `[]` | -| `nginx.affinity` | Node/Pod affinities | `{}` | -| `nginx.topologySpreadConstraints` | Constraints that define how Pods are spread across failure-domains like regions or availability zones | `[]` | -| `nginx.podAnnotations` | Annotations to add to the nginx pod | `{}` | -| `nginx.priorityClassName` | The priority class to run the pod as | | -| **Portal** | | | -| `portal.image.repository` | Repository for portal image | `goharbor/harbor-portal` | -| `portal.image.tag` | Tag for portal image | `dev` | -| `portal.replicas` | The replica count | `1` | -| `portal.revisionHistoryLimit` | The revision history limit | `10` | -| `portal.resources` | The [resources] to allocate for container | undefined | -| `portal.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | -| `portal.nodeSelector` | Node labels for pod assignment | `{}` | -| `portal.tolerations` | Tolerations for pod assignment | `[]` | -| `portal.affinity` | Node/Pod affinities | `{}` | -| `portal.topologySpreadConstraints` | Constraints that define how Pods are spread across failure-domains like regions or availability zones | `[]` | -| `portal.podAnnotations` | Annotations to add to the portal pod | `{}` | -| `portal.serviceAnnotations` | Annotations to add to the portal service | `{}` | -| `portal.priorityClassName` | The priority class to run the pod as | | -| `portal.initContainers` | Init containers to be run before the controller's container starts. | `[]` | -| **Core** | | | -| `core.image.repository` | Repository for Harbor core image | `goharbor/harbor-core` | -| `core.image.tag` | Tag for Harbor core image | `dev` | -| `core.replicas` | The replica count | `1` | -| `core.revisionHistoryLimit` | The revision history limit | `10` | -| `core.startupProbe.initialDelaySeconds` | The initial delay in seconds for the startup probe | `10` | -| `core.resources` | The [resources] to allocate for container | undefined | -| `core.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | -| `core.nodeSelector` | Node labels for pod assignment | `{}` | -| `core.tolerations` | Tolerations for pod assignment | `[]` | -| `core.affinity` | Node/Pod affinities | `{}` | -| `core.topologySpreadConstraints` | Constraints that define how Pods are spread across failure-domains like regions or availability zones | `[]` | -| `core.podAnnotations` | Annotations to add to the core pod | `{}` | -| `core.serviceAnnotations` | Annotations to add to the core service | `{}` | -| `core.configureUserSettings` | A JSON string to set in the environment variable `CONFIG_OVERWRITE_JSON` to configure user settings. See the [official docs](https://goharbor.io/docs/latest/install-config/configure-user-settings-cli/#configure-users-settings-using-an-environment-variable). | | -| `core.quotaUpdateProvider` | The provider for updating project quota(usage), there are 2 options, redis or db. By default it is implemented by db but you can configure it to redis which can improve the performance of high concurrent pushing to the same project, and reduce the database connections spike and occupies. Using redis will bring up some delay for quota usage updation for display, so only suggest switch provider to redis if you were ran into the db connections spike around the scenario of high concurrent pushing to same project, no improvment for other scenes. | `db` | -| `core.secret` | Secret is used when core server communicates with other components. If a secret key is not specified, Helm will generate one. Must be a string of 16 chars. | | -| `core.secretName` | Fill the name of a kubernetes secret if you want to use your own TLS certificate and private key for token encryption/decryption. The secret must contain keys named: `tls.crt` - the certificate and `tls.key` - the private key. The default key pair will be used if it isn't set | | -| `core.tokenKey` | PEM-formatted RSA private key used to sign service tokens. Only used if `core.secretName` is unset. If set, `core.tokenCert` MUST also be set. | | -| `core.tokenCert` | PEM-formatted certificate signed by `core.tokenKey` used to validate service tokens. Only used if `core.secretName` is unset. If set, `core.tokenKey` MUST also be set. | | -| `core.xsrfKey` | The XSRF key. Will be generated automatically if it isn't specified | | -| `core.priorityClassName` | The priority class to run the pod as | | -| `core.artifactPullAsyncFlushDuration` | The time duration for async update artifact pull_time and repository pull_count | | -| `core.gdpr.deleteUser` | Enable GDPR compliant user delete | `false` | -| `core.gdpr.auditLogsCompliant` | Enable GDPR compliant for audit logs by changing username to its CRC32 value if that user was deleted from the system | `false` | -| `core.initContainers` | Init containers to be run before the controller's container starts. | `[]` | -| **Jobservice** | | | -| `jobservice.image.repository` | Repository for jobservice image | `goharbor/harbor-jobservice` | -| `jobservice.image.tag` | Tag for jobservice image | `dev` | -| `jobservice.replicas` | The replica count | `1` | -| `jobservice.revisionHistoryLimit` | The revision history limit | `10` | -| `jobservice.maxJobWorkers` | The max job workers | `10` | -| `jobservice.jobLoggers` | The loggers for jobs: `file`, `database` or `stdout` | `[file]` | -| `jobservice.loggerSweeperDuration` | The jobLogger sweeper duration in days (ignored if `jobLoggers` is set to `stdout`) | `14` | -| `jobservice.notification.webhook_job_max_retry` | The maximum retry of webhook sending notifications | `3` | -| `jobservice.notification.webhook_job_http_client_timeout` | The http client timeout value of webhook sending notifications | `3` | -| `jobservice.reaper.max_update_hours` | the max time to wait for a task to finish, if unfinished after max_update_hours, the task will be mark as error, but the task will continue to run, default value is 24 | `24` | -| `jobservice.reaper.max_dangling_hours` | the max time for execution in running state without new task created | `168` | -| `jobservice.resources` | The [resources] to allocate for container | undefined | -| `jobservice.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | -| `jobservice.nodeSelector` | Node labels for pod assignment | `{}` | -| `jobservice.tolerations` | Tolerations for pod assignment | `[]` | -| `jobservice.affinity` | Node/Pod affinities | `{}` | -| `jobservice.topologySpreadConstraints` | Constraints that define how Pods are spread across failure-domains like regions or availability zones | `[]` | -| `jobservice.podAnnotations` | Annotations to add to the jobservice pod | `{}` | -| `jobservice.priorityClassName` | The priority class to run the pod as | | -| `jobservice.secret` | Secret is used when job service communicates with other components. If a secret key is not specified, Helm will generate one. Must be a string of 16 chars. | | -| `jobservice.initContainers` | Init containers to be run before the controller's container starts. | `[]` | -| **Registry** | | | -| `registry.registry.image.repository` | Repository for registry image | `goharbor/registry-photon` | -| `registry.registry.image.tag` | Tag for registry image | `dev` | -| `registry.registry.resources` | The [resources] to allocate for container | undefined | -| `registry.controller.image.repository` | Repository for registry controller image | `goharbor/harbor-registryctl` | -| `registry.controller.image.tag` | Tag for registry controller image | `dev` | -| `registry.controller.resources` | The [resources] to allocate for container | undefined | -| `registry.replicas` | The replica count | `1` | -| `registry.revisionHistoryLimit` | The revision history limit | `10` | -| `registry.nodeSelector` | Node labels for pod assignment | `{}` | -| `registry.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | -| `registry.tolerations` | Tolerations for pod assignment | `[]` | -| `registry.affinity` | Node/Pod affinities | `{}` | -| `registry.topologySpreadConstraints` | Constraints that define how Pods are spread across failure-domains like regions or availability zones | `[]` | -| `registry.middleware` | Middleware is used to add support for a CDN between backend storage and `docker pull` recipient. See [official docs](https://github.com/docker/distribution/blob/master/docs/configuration.md#middleware). | | -| `registry.podAnnotations` | Annotations to add to the registry pod | `{}` | -| `registry.priorityClassName` | The priority class to run the pod as | | -| `registry.secret` | Secret is used to secure the upload state from client and registry storage backend. See [official docs](https://github.com/docker/distribution/blob/master/docs/configuration.md#http). If a secret key is not specified, Helm will generate one. Must be a string of 16 chars. | | -| `registry.credentials.username` | The username that harbor core uses internally to access the registry instance. Together with the `registry.credentials.password`, a htpasswd is created. This is an alternative to providing `registry.credentials.htpasswdString`. For more details see [official docs](https://github.com/docker/distribution/blob/master/docs/configuration.md#htpasswd). | `harbor_registry_user` | -| `registry.credentials.password` | The password that harbor core uses internally to access the registry instance. Together with the `registry.credentials.username`, a htpasswd is created. This is an alternative to providing `registry.credentials.htpasswdString`. For more details see [official docs](https://github.com/docker/distribution/blob/master/docs/configuration.md#htpasswd). It is suggested you update this value before installation. | `harbor_registry_password` | -| `registry.credentials.existingSecret` | An existing secret containing the password for accessing the registry instance, which is hosted by htpasswd auth mode. More details see [official docs](https://github.com/docker/distribution/blob/master/docs/configuration.md#htpasswd). The key must be `REGISTRY_PASSWD` | `""` | -| `registry.credentials.htpasswdString` | Login and password in htpasswd string format. Excludes `registry.credentials.username` and `registry.credentials.password`. May come in handy when integrating with tools like argocd or flux. This allows the same line to be generated each time the template is rendered, instead of the `htpasswd` function from helm, which generates different lines each time because of the salt. | undefined | -| `registry.relativeurls` | If true, the registry returns relative URLs in Location headers. The client is responsible for resolving the correct URL. Needed if harbor is behind a reverse proxy | `false` | -| `registry.upload_purging.enabled` | If true, enable purge _upload directories | `true` | -| `registry.upload_purging.age` | Remove files in _upload directories which exist for a period of time, default is one week. | `168h` | -| `registry.upload_purging.interval` | The interval of the purge operations | `24h` | -| `registry.upload_purging.dryrun` | If true, enable dryrun for purging _upload, default false | `false` | -| `registry.initContainers` | Init containers to be run before the controller's container starts. | `[]` | -| **[Trivy][trivy]** | | | -| `trivy.enabled` | The flag to enable Trivy scanner | `true` | -| `trivy.image.repository` | Repository for Trivy adapter image | `goharbor/trivy-adapter-photon` | -| `trivy.image.tag` | Tag for Trivy adapter image | `dev` | -| `trivy.resources` | The [resources] to allocate for Trivy adapter container | | -| `trivy.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | -| `trivy.replicas` | The number of Pod replicas | `1` | -| `trivy.debugMode` | The flag to enable Trivy debug mode | `false` | -| `trivy.vulnType` | Comma-separated list of vulnerability types. Possible values `os` and `library`. | `os,library` | -| `trivy.severity` | Comma-separated list of severities to be checked | `UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL` | -| `trivy.ignoreUnfixed` | The flag to display only fixed vulnerabilities | `false` | -| `trivy.insecure` | The flag to skip verifying registry certificate | `false` | -| `trivy.skipUpdate` | The flag to disable [Trivy DB][trivy-db] downloads from GitHub | `false` | -| `trivy.skipJavaDBUpdate` | If the flag is enabled you have to manually download the `trivy-java.db` file [Trivy Java DB][trivy-java-db] and mount it in the `/home/scanner/.cache/trivy/java-db/trivy-java.db` path | `false` | -| `trivy.offlineScan` | The flag prevents Trivy from sending API requests to identify dependencies. | `false` | -| `trivy.securityCheck` | Comma-separated list of what security issues to detect. | `vuln` | -| `trivy.timeout` | The duration to wait for scan completion | `5m0s` | -| `trivy.gitHubToken` | The GitHub access token to download [Trivy DB][trivy-db] (see [GitHub rate limiting][trivy-rate-limiting]) | | -| `trivy.priorityClassName` | The priority class to run the pod as | | -| `trivy.topologySpreadConstraints` | The priority class to run the pod as | | -| `trivy.initContainers` | Init containers to be run before the controller's container starts. | `[]` | -| **Database** | | | -| `database.type` | If external database is used, set it to `external` | `internal` | -| `database.internal.image.repository` | Repository for database image | `goharbor/harbor-db` | -| `database.internal.image.tag` | Tag for database image | `dev` | -| `database.internal.password` | The password for database | `changeit` | -| `database.internal.shmSizeLimit` | The limit for the size of shared memory for internal PostgreSQL, conventionally it's around 50% of the memory limit of the container | `512Mi` | -| `database.internal.resources` | The [resources] to allocate for container | undefined | -| `database.internal.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | -| `database.internal.initContainer.migrator.resources` | The [resources] to allocate for the database migrator initContainer | undefined | -| `database.internal.initContainer.permissions.resources` | The [resources] to allocate for the database permissions initContainer | undefined | -| `database.internal.nodeSelector` | Node labels for pod assignment | `{}` | -| `database.internal.tolerations` | Tolerations for pod assignment | `[]` | -| `database.internal.affinity` | Node/Pod affinities | `{}` | -| `database.internal.priorityClassName` | The priority class to run the pod as | | -| `database.internal.livenessProbe.timeoutSeconds` | The timeout used in liveness probe; 1 to 5 seconds | 1 | -| `database.internal.readinessProbe.timeoutSeconds` | The timeout used in readiness probe; 1 to 5 seconds | 1 | -| `database.internal.extrInitContainers` | Extra init containers to be run before the database's container starts. | `[]` | -| `database.external.host` | The hostname of external database | `192.168.0.1` | -| `database.external.port` | The port of external database | `5432` | -| `database.external.username` | The username of external database | `user` | -| `database.external.password` | The password of external database | `password` | -| `database.external.coreDatabase` | The database used by core service | `registry` | -| `database.external.existingSecret` | An existing password containing the database password. the key must be `password`. | `""` | -| `database.external.sslmode` | Connection method of external database (require, verify-full, verify-ca, disable) | `disable` | -| `database.maxIdleConns` | The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained. | `50` | -| `database.maxOpenConns` | The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections. | `100` | -| `database.podAnnotations` | Annotations to add to the database pod | `{}` | -| **Redis** | | | -| `redis.type` | If external redis is used, set it to `external` | `internal` | -| `redis.internal.image.repository` | Repository for redis image | `goharbor/redis-photon` | -| `redis.internal.image.tag` | Tag for redis image | `dev` | -| `redis.internal.resources` | The [resources] to allocate for container | undefined | -| `redis.internal.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | -| `redis.internal.nodeSelector` | Node labels for pod assignment | `{}` | -| `redis.internal.tolerations` | Tolerations for pod assignment | `[]` | -| `redis.internal.affinity` | Node/Pod affinities | `{}` | -| `redis.internal.priorityClassName` | The priority class to run the pod as | | -| `redis.internal.jobserviceDatabaseIndex` | The database index for jobservice | `1` | -| `redis.internal.registryDatabaseIndex` | The database index for registry | `2` | -| `redis.internal.trivyAdapterIndex` | The database index for trivy adapter | `5` | -| `redis.internal.harborDatabaseIndex` | The database index for harbor miscellaneous business logic | `0` | -| `redis.internal.cacheLayerDatabaseIndex` | The database index for harbor cache layer | `0` | -| `redis.internal.initContainers` | Init containers to be run before the redis's container starts. | `[]` | -| `redis.external.addr` | The addr of external Redis: :. When using sentinel, it should be :,:,: | `192.168.0.2:6379` | -| `redis.external.sentinelMasterSet` | The name of the set of Redis instances to monitor | | -| `redis.external.coreDatabaseIndex` | The database index for core | `0` | -| `redis.external.jobserviceDatabaseIndex` | The database index for jobservice | `1` | -| `redis.external.registryDatabaseIndex` | The database index for registry | `2` | -| `redis.external.trivyAdapterIndex` | The database index for trivy adapter | `5` | -| `redis.external.harborDatabaseIndex` | The database index for harbor miscellaneous business logic | `0` | -| `redis.external.cacheLayerDatabaseIndex` | The database index for harbor cache layer | `0` | -| `redis.external.username` | The username of external Redis | | -| `redis.external.password` | The password of external Redis | | -| `redis.external.existingSecret` | Use an existing secret to connect to redis. The key must be `REDIS_PASSWORD`. | `""` | -| `redis.podAnnotations` | Annotations to add to the redis pod | `{}` | -| **Exporter** | | | -| `exporter.replicas` | The replica count | `1` | -| `exporter.revisionHistoryLimit` | The revision history limit | `10` | -| `exporter.podAnnotations` | Annotations to add to the exporter pod | `{}` | -| `exporter.image.repository` | Repository for redis image | `goharbor/harbor-exporter` | -| `exporter.image.tag` | Tag for exporter image | `dev` | -| `exporter.nodeSelector` | Node labels for pod assignment | `{}` | -| `exporter.tolerations` | Tolerations for pod assignment | `[]` | -| `exporter.affinity` | Node/Pod affinities | `{}` | -| `exporter.topologySpreadConstraints` | Constraints that define how Pods are spread across failure-domains like regions or availability zones | `[]` | -| `exporter.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | -| `exporter.cacheDuration` | the cache duration for information that exporter collected from Harbor | `30` | -| `exporter.cacheCleanInterval` | cache clean interval for information that exporter collected from Harbor | `14400` | -| `exporter.priorityClassName` | The priority class to run the pod as | | -| **Metrics** | | | -| `metrics.enabled` | if enable harbor metrics | `false` | -| `metrics.core.path` | the url path for core metrics | `/metrics` | -| `metrics.core.port` | the port for core metrics | `8001` | -| `metrics.registry.path` | the url path for registry metrics | `/metrics` | -| `metrics.registry.port` | the port for registry metrics | `8001` | -| `metrics.exporter.path` | the url path for exporter metrics | `/metrics` | -| `metrics.exporter.port` | the port for exporter metrics | `8001` | -| `metrics.serviceMonitor.enabled` | create prometheus serviceMonitor. Requires prometheus CRD's | `false` | -| `metrics.serviceMonitor.additionalLabels` | additional labels to upsert to the manifest | `""` | -| `metrics.serviceMonitor.interval` | scrape period for harbor metrics | `""` | -| `metrics.serviceMonitor.metricRelabelings` | metrics relabel to add/mod/del before ingestion | `[]` | -| `metrics.serviceMonitor.relabelings` | relabels to add/mod/del to sample before scrape | `[]` | -| **Trace** | | | -| `trace.enabled` | Enable tracing or not | `false` | -| `trace.provider` | The tracing provider: `jaeger` or `otel`. `jaeger` should be 1.26+ | `jaeger` | -| `trace.sample_rate` | Set `sample_rate` to 1 if you want sampling 100% of trace data; set 0.5 if you want sampling 50% of trace data, and so forth | `1` | -| `trace.namespace` | Namespace used to differentiate different harbor services | | -| `trace.attributes` | `attributes` is a key value dict contains user defined attributes used to initialize trace provider | | -| `trace.jaeger.endpoint` | The endpoint of jaeger | `http://hostname:14268/api/traces` | -| `trace.jaeger.username` | The username of jaeger | | -| `trace.jaeger.password` | The password of jaeger | | -| `trace.jaeger.agent_host` | The agent host of jaeger | | -| `trace.jaeger.agent_port` | The agent port of jaeger | `6831` | -| `trace.otel.endpoint` | The endpoint of otel | `hostname:4318` | -| `trace.otel.url_path` | The URL path of otel | `/v1/traces` | -| `trace.otel.compression` | Whether enable compression or not for otel | `false` | -| `trace.otel.insecure` | Whether establish insecure connection or not for otel | `true` | -| `trace.otel.timeout` | The timeout in seconds of otel | `10` | -| **Cache** | | | -| `cache.enabled` | Enable cache layer or not | `false` | -| `cache.expireHours` | The expire hours of cache layer | `24` | +| Parameter | Description | Default | +| -------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------ | +| **Expose** | | | +| `expose.type` | How to expose the service: `ingress`, `clusterIP`, `nodePort` or `loadBalancer`, other values will be ignored and the creation of service will be skipped. | `ingress` | +| `expose.tls.enabled` | Enable TLS or not. Delete the `ssl-redirect` annotations in `expose.ingress.annotations` when TLS is disabled and `expose.type` is `ingress`. Note: if the `expose.type` is `ingress` and TLS is disabled, the port must be included in the command when pulling/pushing images. Refer to https://github.com/goharbor/harbor/issues/5291 for details. | `true` | +| `expose.tls.certSource` | The source of the TLS certificate. Set as `auto`, `secret` or `none` and fill the information in the corresponding section: 1) auto: generate the TLS certificate automatically 2) secret: read the TLS certificate from the specified secret. The TLS certificate can be generated manually or by cert manager 3) none: configure no TLS certificate for the ingress. If the default TLS certificate is configured in the ingress controller, choose this option | `auto` | +| `expose.tls.auto.commonName` | The common name used to generate the certificate, it's necessary when the type isn't `ingress` | | +| `expose.tls.secret.secretName` | The name of secret which contains keys named: `tls.crt` - the certificate; `tls.key` - the private key | | +| `expose.ingress.hosts.core` | The host of Harbor core service in ingress rule | `core.harbor.domain` | +| `expose.ingress.controller` | The ingress controller type. Currently supports `default`, `gce`, `alb`, `f5-bigip` and `ncp` | `default` | +| `expose.ingress.kubeVersionOverride` | Allows the ability to override the kubernetes version used while templating the ingress | | +| `expose.ingress.annotations` | The annotations used commonly for ingresses | | +| `expose.ingress.labels` | The labels specific to ingress | {} | +| `expose.clusterIP.name` | The name of ClusterIP service | `harbor` | +| `expose.clusterIP.annotations` | The annotations attached to the ClusterIP service | {} | +| `expose.clusterIP.ports.httpPort` | The service port Harbor listens on when serving HTTP | `80` | +| `expose.clusterIP.ports.httpsPort` | The service port Harbor listens on when serving HTTPS | `443` | +| `expose.clusterIP.annotations` | The annotations used commonly for clusterIP | | +| `expose.clusterIP.labels` | The labels specific to clusterIP | {} | +| `expose.nodePort.name` | The name of NodePort service | `harbor` | +| `expose.nodePort.ports.http.port` | The service port Harbor listens on when serving HTTP | `80` | +| `expose.nodePort.ports.http.nodePort` | The node port Harbor listens on when serving HTTP | `30002` | +| `expose.nodePort.ports.https.port` | The service port Harbor listens on when serving HTTPS | `443` | +| `expose.nodePort.ports.https.nodePort` | The node port Harbor listens on when serving HTTPS | `30003` | +| `expose.nodePort.annotations` | The annotations used commonly for nodePort | | +| `expose.nodePort.labels` | The labels specific to nodePort | {} | +| `expose.loadBalancer.name` | The name of service | `harbor` | +| `expose.loadBalancer.IP` | The IP of the loadBalancer. It only works when loadBalancer supports assigning IP | `""` | +| `expose.loadBalancer.ports.httpPort` | The service port Harbor listens on when serving HTTP | `80` | +| `expose.loadBalancer.ports.httpsPort` | The service port Harbor listens on when serving HTTPS | `30002` | +| `expose.loadBalancer.annotations` | The annotations attached to the loadBalancer service | {} | +| `expose.loadBalancer.labels` | The labels specific to loadBalancer | {} | +| `expose.loadBalancer.sourceRanges` | List of IP address ranges to assign to loadBalancerSourceRanges | [] | +| **Internal TLS** | | | +| `internalTLS.enabled` | Enable TLS for the components (core, jobservice, portal, registry, trivy) | `false` | +| `internalTLS.strong_ssl_ciphers` | Enable strong ssl ciphers for nginx and portal | `false` | +| `internalTLS.certSource` | Method to provide TLS for the components, options are `auto`, `manual`, `secret`. | `auto` | +| `internalTLS.trustCa` | The content of trust CA, only available when `certSource` is `manual`. **Note**: all the internal certificates of the components must be issued by this CA | | +| `internalTLS.core.secretName` | The secret name for core component, only available when `certSource` is `secret`. The secret must contain keys named: `ca.crt` - the CA certificate which is used to issue internal key and crt pair for components and all Harbor components must be issued by the same CA, `tls.crt` - the content of the TLS cert file, `tls.key` - the content of the TLS key file. | | +| `internalTLS.core.crt` | Content of core's TLS cert file, only available when `certSource` is `manual` | | +| `internalTLS.core.key` | Content of core's TLS key file, only available when `certSource` is `manual` | | +| `internalTLS.jobservice.secretName` | The secret name for jobservice component, only available when `certSource` is `secret`. The secret must contain keys named: `ca.crt` - the CA certificate which is used to issue internal key and crt pair for components and all Harbor components must be issued by the same CA, `tls.crt` - the content of the TLS cert file, `tls.key` - the content of the TLS key file. | | +| `internalTLS.jobservice.crt` | Content of jobservice's TLS cert file, only available when `certSource` is `manual` | | +| `internalTLS.jobservice.key` | Content of jobservice's TLS key file, only available when `certSource` is `manual` | | +| `internalTLS.registry.secretName` | The secret name for registry component, only available when `certSource` is `secret`. The secret must contain keys named: `ca.crt` - the CA certificate which is used to issue internal key and crt pair for components and all Harbor components must be issued by the same CA, `tls.crt` - the content of the TLS cert file, `tls.key` - the content of the TLS key file. | | +| `internalTLS.registry.crt` | Content of registry's TLS cert file, only available when `certSource` is `manual` | | +| `internalTLS.registry.key` | Content of registry's TLS key file, only available when `certSource` is `manual` | | +| `internalTLS.portal.secretName` | The secret name for portal component, only available when `certSource` is `secret`. The secret must contain keys named: `ca.crt` - the CA certificate which is used to issue internal key and crt pair for components and all Harbor components must be issued by the same CA, `tls.crt` - the content of the TLS cert file, `tls.key` - the content of the TLS key file. | | +| `internalTLS.portal.crt` | Content of portal's TLS cert file, only available when `certSource` is `manual` | | +| `internalTLS.portal.key` | Content of portal's TLS key file, only available when `certSource` is `manual` | | +| `internalTLS.trivy.secretName` | The secret name for trivy component, only available when `certSource` is `secret`. The secret must contain keys named: `ca.crt` - the CA certificate which is used to issue internal key and crt pair for components and all Harbor components must be issued by the same CA, `tls.crt` - the content of the TLS cert file, `tls.key` - the content of the TLS key file. | | +| `internalTLS.trivy.crt` | Content of trivy's TLS cert file, only available when `certSource` is `manual` | | +| `internalTLS.trivy.key` | Content of trivy's TLS key file, only available when `certSource` is `manual` | | +| **IPFamily** | | | +| `ipFamily.ipv4.enabled` | if cluster is ipv4 enabled, all ipv4 related configs will set correspondingly, but currently it only affects the nginx related components | `true` | +| `ipFamily.ipv6.enabled` | if cluster is ipv6 enabled, all ipv6 related configs will set correspondingly, but currently it only affects the nginx related components | `true` | +| **Persistence** | | | +| `persistence.enabled` | Enable the data persistence or not | `true` | +| `persistence.resourcePolicy` | Setting it to `keep` to avoid removing PVCs during a helm delete operation. Leaving it empty will delete PVCs after the chart deleted. Does not affect PVCs created for internal database and redis components. | `keep` | +| `persistence.persistentVolumeClaim.registry.existingClaim` | Use the existing PVC which must be created manually before bound, and specify the `subPath` if the PVC is shared with other components | | +| `persistence.persistentVolumeClaim.registry.storageClass` | Specify the `storageClass` used to provision the volume. Or the default StorageClass will be used (the default). Set it to `-` to disable dynamic provisioning | | +| `persistence.persistentVolumeClaim.registry.subPath` | The sub path used in the volume | | +| `persistence.persistentVolumeClaim.registry.accessMode` | The access mode of the volume | `ReadWriteOnce` | +| `persistence.persistentVolumeClaim.registry.size` | The size of the volume | `5Gi` | +| `persistence.persistentVolumeClaim.registry.annotations` | The annotations of the volume | | +| `persistence.persistentVolumeClaim.jobservice.jobLog.existingClaim` | Use the existing PVC which must be created manually before bound, and specify the `subPath` if the PVC is shared with other components. | | +| `persistence.persistentVolumeClaim.jobservice.jobLog.storageClass` | Specify the `storageClass` used to provision the volume. Or the default StorageClass will be used (the default). Set it to `-` to disable dynamic provisioning | | +| `persistence.persistentVolumeClaim.jobservice.jobLog.subPath` | The sub path used in the volume | | +| `persistence.persistentVolumeClaim.jobservice.jobLog.accessMode` | The access mode of the volume | `ReadWriteOnce` | +| `persistence.persistentVolumeClaim.jobservice.jobLog.size` | The size of the volume | `1Gi` | +| `persistence.persistentVolumeClaim.jobservice.jobLog.annotations` | The annotations of the volume | | +| `persistence.persistentVolumeClaim.database.existingClaim` | Use the existing PVC which must be created manually before bound, and specify the `subPath` if the PVC is shared with other components. If external database is used, the setting will be ignored | | +| `persistence.persistentVolumeClaim.database.storageClass` | Specify the `storageClass` used to provision the volume. Or the default StorageClass will be used (the default). Set it to `-` to disable dynamic provisioning. If external database is used, the setting will be ignored | | +| `persistence.persistentVolumeClaim.database.subPath` | The sub path used in the volume. If external database is used, the setting will be ignored | | +| `persistence.persistentVolumeClaim.database.accessMode` | The access mode of the volume. If external database is used, the setting will be ignored | `ReadWriteOnce` | +| `persistence.persistentVolumeClaim.database.size` | The size of the volume. If external database is used, the setting will be ignored | `1Gi` | +| `persistence.persistentVolumeClaim.database.annotations` | The annotations of the volume | | +| `persistence.persistentVolumeClaim.redis.existingClaim` | Use the existing PVC which must be created manually before bound, and specify the `subPath` if the PVC is shared with other components. If external Redis is used, the setting will be ignored | | +| `persistence.persistentVolumeClaim.redis.storageClass` | Specify the `storageClass` used to provision the volume. Or the default StorageClass will be used (the default). Set it to `-` to disable dynamic provisioning. If external Redis is used, the setting will be ignored | | +| `persistence.persistentVolumeClaim.redis.subPath` | The sub path used in the volume. If external Redis is used, the setting will be ignored | | +| `persistence.persistentVolumeClaim.redis.accessMode` | The access mode of the volume. If external Redis is used, the setting will be ignored | `ReadWriteOnce` | +| `persistence.persistentVolumeClaim.redis.size` | The size of the volume. If external Redis is used, the setting will be ignored | `1Gi` | +| `persistence.persistentVolumeClaim.redis.annotations` | The annotations of the volume | | +| `persistence.persistentVolumeClaim.trivy.existingClaim` | Use the existing PVC which must be created manually before bound, and specify the `subPath` if the PVC is shared with other components | | +| `persistence.persistentVolumeClaim.trivy.storageClass` | Specify the `storageClass` used to provision the volume. Or the default StorageClass will be used (the default). Set it to `-` to disable dynamic provisioning | | +| `persistence.persistentVolumeClaim.trivy.subPath` | The sub path used in the volume | | +| `persistence.persistentVolumeClaim.trivy.accessMode` | The access mode of the volume | `ReadWriteOnce` | +| `persistence.persistentVolumeClaim.trivy.size` | The size of the volume | `1Gi` | +| `persistence.persistentVolumeClaim.trivy.annotations` | The annotations of the volume | | +| `persistence.imageChartStorage.disableredirect` | The configuration for managing redirects from content backends. For backends which not supported it (such as using minio for `s3` storage type), please set it to `true` to disable redirects. Refer to the [guide](https://github.com/docker/distribution/blob/master/docs/configuration.md#redirect) for more details | `false` | +| `persistence.imageChartStorage.caBundleSecretName` | Specify the `caBundleSecretName` if the storage service uses a self-signed certificate. The secret must contain keys named `ca.crt` which will be injected into the trust store of registry's and containers. | | +| `persistence.imageChartStorage.type` | The type of storage for images and charts: `filesystem`, `azure`, `gcs`, `s3`, `swift` or `oss`. The type must be `filesystem` if you want to use persistent volumes for registry. Refer to the [guide](https://github.com/docker/distribution/blob/master/docs/configuration.md#storage) for more details | `filesystem` | +| `persistence.imageChartStorage.gcs.existingSecret` | An existing secret containing the gcs service account json key. The key must be gcs-key.json. | `""` | +| `persistence.imageChartStorage.gcs.useWorkloadIdentity` | A boolean to allow the use of workloadidentity in a GKE cluster. To use it, create a kubernetes service account and set the name in the key `serviceAccountName` of each component, then allow automounting the service account. | `false` | +| **General** | | | +| `externalURL` | The external URL for Harbor core service | `https://core.harbor.domain` | +| `caBundleSecretName` | The custom CA bundle secret name, the secret must contain key named "ca.crt" which will be injected into the trust store for core, jobservice, registry, trivy components. | | +| `uaaSecretName` | If using external UAA auth which has a self signed cert, you can provide a pre-created secret containing it under the key `ca.crt`. | | +| `imagePullPolicy` | The image pull policy | | +| `imagePullSecrets` | The imagePullSecrets names for all deployments | | +| `updateStrategy.type` | The update strategy for deployments with persistent volumes(jobservice, registry): `RollingUpdate` or `Recreate`. Set it as `Recreate` when `RWM` for volumes isn't supported | `RollingUpdate` | +| `logLevel` | The log level: `debug`, `info`, `warning`, `error` or `fatal` | `info` | +| `harborAdminPassword` | The initial password of Harbor admin. Change it from portal after launching Harbor | `Harbor12345` | +| `existingSecretAdminPassword` | The name of secret where admin password can be found. | | +| `existingSecretAdminPasswordKey` | The name of the key in the secret where to find harbor admin password Harbor | `HARBOR_ADMIN_PASSWORD` | +| `caSecretName` | The name of the secret which contains key named `ca.crt`. Setting this enables the download link on portal to download the CA certificate when the certificate isn't generated automatically | | +| `secretKey` | The key used for encryption. Must be a string of 16 chars | `not-a-secure-key` | +| `existingSecretSecretKey` | An existing secret containing the encoding secretKey | `""` | +| `proxy.httpProxy` | The URL of the HTTP proxy server | | +| `proxy.httpsProxy` | The URL of the HTTPS proxy server | | +| `proxy.noProxy` | The URLs that the proxy settings not apply to | 127.0.0.1,localhost,.local,.internal | +| `proxy.components` | The component list that the proxy settings apply to | core, jobservice, trivy | +| `enableMigrateHelmHook` | Run the migration job via helm hook, if it is true, the database migration will be separated from harbor-core, run with a preupgrade job migration-job | `false` | +| **Nginx** (if service exposed via `ingress`, Nginx will not be used) | | | +| `nginx.image.repository` | Image repository | `goharbor/nginx-photon` | +| `nginx.image.tag` | Image tag | `dev` | +| `nginx.replicas` | The replica count | `1` | +| `nginx.revisionHistoryLimit` | The revision history limit | `10` | +| `nginx.resources` | The [resources] to allocate for container | undefined | +| `nginx.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | +| `nginx.nodeSelector` | Node labels for pod assignment | `{}` | +| `nginx.tolerations` | Tolerations for pod assignment | `[]` | +| `nginx.affinity` | Node/Pod affinities | `{}` | +| `nginx.topologySpreadConstraints` | Constraints that define how Pods are spread across failure-domains like regions or availability zones | `[]` | +| `nginx.podAnnotations` | Annotations to add to the nginx pod | `{}` | +| `nginx.priorityClassName` | The priority class to run the pod as | | +| **Portal** | | | +| `portal.image.repository` | Repository for portal image | `goharbor/harbor-portal` | +| `portal.image.tag` | Tag for portal image | `dev` | +| `portal.replicas` | The replica count | `1` | +| `portal.revisionHistoryLimit` | The revision history limit | `10` | +| `portal.resources` | The [resources] to allocate for container | undefined | +| `portal.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | +| `portal.nodeSelector` | Node labels for pod assignment | `{}` | +| `portal.tolerations` | Tolerations for pod assignment | `[]` | +| `portal.affinity` | Node/Pod affinities | `{}` | +| `portal.topologySpreadConstraints` | Constraints that define how Pods are spread across failure-domains like regions or availability zones | `[]` | +| `portal.podAnnotations` | Annotations to add to the portal pod | `{}` | +| `portal.serviceAnnotations` | Annotations to add to the portal service | `{}` | +| `portal.priorityClassName` | The priority class to run the pod as | | +| `portal.initContainers` | Init containers to be run before the controller's container starts. | `[]` | +| **Core** | | | +| `core.image.repository` | Repository for Harbor core image | `goharbor/harbor-core` | +| `core.image.tag` | Tag for Harbor core image | `dev` | +| `core.replicas` | The replica count | `1` | +| `core.revisionHistoryLimit` | The revision history limit | `10` | +| `core.startupProbe.initialDelaySeconds` | The initial delay in seconds for the startup probe | `10` | +| `core.resources` | The [resources] to allocate for container | undefined | +| `core.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | +| `core.nodeSelector` | Node labels for pod assignment | `{}` | +| `core.tolerations` | Tolerations for pod assignment | `[]` | +| `core.affinity` | Node/Pod affinities | `{}` | +| `core.topologySpreadConstraints` | Constraints that define how Pods are spread across failure-domains like regions or availability zones | `[]` | +| `core.podAnnotations` | Annotations to add to the core pod | `{}` | +| `core.serviceAnnotations` | Annotations to add to the core service | `{}` | +| `core.configureUserSettings` | A JSON string to set in the environment variable `CONFIG_OVERWRITE_JSON` to configure user settings. See the [official docs](https://goharbor.io/docs/latest/install-config/configure-user-settings-cli/#configure-users-settings-using-an-environment-variable). | | +| `core.quotaUpdateProvider` | The provider for updating project quota(usage), there are 2 options, redis or db. By default it is implemented by db but you can configure it to redis which can improve the performance of high concurrent pushing to the same project, and reduce the database connections spike and occupies. Using redis will bring up some delay for quota usage updation for display, so only suggest switch provider to redis if you were ran into the db connections spike around the scenario of high concurrent pushing to same project, no improvment for other scenes. | `db` | +| `core.secret` | Secret is used when core server communicates with other components. If a secret key is not specified, Helm will generate one. Must be a string of 16 chars. | | +| `core.secretName` | Fill the name of a kubernetes secret if you want to use your own TLS certificate and private key for token encryption/decryption. The secret must contain keys named: `tls.crt` - the certificate and `tls.key` - the private key. The default key pair will be used if it isn't set | | +| `core.tokenKey` | PEM-formatted RSA private key used to sign service tokens. Only used if `core.secretName` is unset. If set, `core.tokenCert` MUST also be set. | | +| `core.tokenCert` | PEM-formatted certificate signed by `core.tokenKey` used to validate service tokens. Only used if `core.secretName` is unset. If set, `core.tokenKey` MUST also be set. | | +| `core.xsrfKey` | The XSRF key. Will be generated automatically if it isn't specified | | +| `core.priorityClassName` | The priority class to run the pod as | | +| `core.artifactPullAsyncFlushDuration` | The time duration for async update artifact pull_time and repository pull_count | | +| `core.gdpr.deleteUser` | Enable GDPR compliant user delete | `false` | +| `core.gdpr.auditLogsCompliant` | Enable GDPR compliant for audit logs by changing username to its CRC32 value if that user was deleted from the system | `false` | +| `core.initContainers` | Init containers to be run before the controller's container starts. | `[]` | +| **Jobservice** | | | +| `jobservice.image.repository` | Repository for jobservice image | `goharbor/harbor-jobservice` | +| `jobservice.image.tag` | Tag for jobservice image | `dev` | +| `jobservice.replicas` | The replica count | `1` | +| `jobservice.revisionHistoryLimit` | The revision history limit | `10` | +| `jobservice.maxJobWorkers` | The max job workers | `10` | +| `jobservice.jobLoggers` | The loggers for jobs: `file`, `database` or `stdout` | `[file]` | +| `jobservice.loggerSweeperDuration` | The jobLogger sweeper duration in days (ignored if `jobLoggers` is set to `stdout`) | `14` | +| `jobservice.notification.webhook_job_max_retry` | The maximum retry of webhook sending notifications | `3` | +| `jobservice.notification.webhook_job_http_client_timeout` | The http client timeout value of webhook sending notifications | `3` | +| `jobservice.reaper.max_update_hours` | the max time to wait for a task to finish, if unfinished after max_update_hours, the task will be mark as error, but the task will continue to run, default value is 24 | `24` | +| `jobservice.reaper.max_dangling_hours` | the max time for execution in running state without new task created | `168` | +| `jobservice.resources` | The [resources] to allocate for container | undefined | +| `jobservice.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | +| `jobservice.nodeSelector` | Node labels for pod assignment | `{}` | +| `jobservice.tolerations` | Tolerations for pod assignment | `[]` | +| `jobservice.affinity` | Node/Pod affinities | `{}` | +| `jobservice.topologySpreadConstraints` | Constraints that define how Pods are spread across failure-domains like regions or availability zones | `[]` | +| `jobservice.podAnnotations` | Annotations to add to the jobservice pod | `{}` | +| `jobservice.priorityClassName` | The priority class to run the pod as | | +| `jobservice.secret` | Secret is used when job service communicates with other components. If a secret key is not specified, Helm will generate one. Must be a string of 16 chars. | | +| `jobservice.initContainers` | Init containers to be run before the controller's container starts. | `[]` | +| **Registry** | | | +| `registry.registry.image.repository` | Repository for registry image | `goharbor/registry-photon` | +| `registry.registry.image.tag` | Tag for registry image | `dev` | +| `registry.registry.resources` | The [resources] to allocate for container | undefined | +| `registry.controller.image.repository` | Repository for registry controller image | `goharbor/harbor-registryctl` | +| `registry.controller.image.tag` | Tag for registry controller image | `dev` | +| `registry.controller.resources` | The [resources] to allocate for container | undefined | +| `registry.replicas` | The replica count | `1` | +| `registry.revisionHistoryLimit` | The revision history limit | `10` | +| `registry.nodeSelector` | Node labels for pod assignment | `{}` | +| `registry.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | +| `registry.tolerations` | Tolerations for pod assignment | `[]` | +| `registry.affinity` | Node/Pod affinities | `{}` | +| `registry.topologySpreadConstraints` | Constraints that define how Pods are spread across failure-domains like regions or availability zones | `[]` | +| `registry.middleware` | Middleware is used to add support for a CDN between backend storage and `docker pull` recipient. See [official docs](https://github.com/docker/distribution/blob/master/docs/configuration.md#middleware). | | +| `registry.podAnnotations` | Annotations to add to the registry pod | `{}` | +| `registry.priorityClassName` | The priority class to run the pod as | | +| `registry.secret` | Secret is used to secure the upload state from client and registry storage backend. See [official docs](https://github.com/docker/distribution/blob/master/docs/configuration.md#http). If a secret key is not specified, Helm will generate one. Must be a string of 16 chars. | | +| `registry.credentials.username` | The username that harbor core uses internally to access the registry instance. Together with the `registry.credentials.password`, a htpasswd is created. This is an alternative to providing `registry.credentials.htpasswdString`. For more details see [official docs](https://github.com/docker/distribution/blob/master/docs/configuration.md#htpasswd). | `harbor_registry_user` | +| `registry.credentials.password` | The password that harbor core uses internally to access the registry instance. Together with the `registry.credentials.username`, a htpasswd is created. This is an alternative to providing `registry.credentials.htpasswdString`. For more details see [official docs](https://github.com/docker/distribution/blob/master/docs/configuration.md#htpasswd). It is suggested you update this value before installation. | `harbor_registry_password` | +| `registry.credentials.existingSecret` | An existing secret containing the password for accessing the registry instance, which is hosted by htpasswd auth mode. More details see [official docs](https://github.com/docker/distribution/blob/master/docs/configuration.md#htpasswd). The key must be `REGISTRY_PASSWD` | `""` | +| `registry.credentials.htpasswdString` | Login and password in htpasswd string format. Excludes `registry.credentials.username` and `registry.credentials.password`. May come in handy when integrating with tools like argocd or flux. This allows the same line to be generated each time the template is rendered, instead of the `htpasswd` function from helm, which generates different lines each time because of the salt. | undefined | +| `registry.relativeurls` | If true, the registry returns relative URLs in Location headers. The client is responsible for resolving the correct URL. Needed if harbor is behind a reverse proxy | `false` | +| `registry.upload_purging.enabled` | If true, enable purge \_upload directories | `true` | +| `registry.upload_purging.age` | Remove files in \_upload directories which exist for a period of time, default is one week. | `168h` | +| `registry.upload_purging.interval` | The interval of the purge operations | `24h` | +| `registry.upload_purging.dryrun` | If true, enable dryrun for purging \_upload, default false | `false` | +| `registry.initContainers` | Init containers to be run before the controller's container starts. | `[]` | +| **[Trivy][trivy]** | | | +| `trivy.enabled` | The flag to enable Trivy scanner | `true` | +| `trivy.image.repository` | Repository for Trivy adapter image | `goharbor/trivy-adapter-photon` | +| `trivy.image.tag` | Tag for Trivy adapter image | `dev` | +| `trivy.resources` | The [resources] to allocate for Trivy adapter container | | +| `trivy.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | +| `trivy.replicas` | The number of Pod replicas | `1` | +| `trivy.debugMode` | The flag to enable Trivy debug mode | `false` | +| `trivy.vulnType` | Comma-separated list of vulnerability types. Possible values `os` and `library`. | `os,library` | +| `trivy.severity` | Comma-separated list of severities to be checked | `UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL` | +| `trivy.ignoreUnfixed` | The flag to display only fixed vulnerabilities | `false` | +| `trivy.insecure` | The flag to skip verifying registry certificate | `false` | +| `trivy.skipUpdate` | The flag to disable [Trivy DB][trivy-db] downloads from GitHub | `false` | +| `trivy.skipJavaDBUpdate` | If the flag is enabled you have to manually download the `trivy-java.db` file [Trivy Java DB][trivy-java-db] and mount it in the `/home/scanner/.cache/trivy/java-db/trivy-java.db` path | `false` | +| `trivy.dbRepository` | OCI repository to retrieve the trivy vulnerability database from | `ghcr.io/aquasecurity/trivy-db` | +| `trivy.javaDBRepository` | OCI repository to retrieve the Java trivy vulnerability database from | `ghcr.io/aquasecurity/trivy-java-db` | +| `trivy.offlineScan` | The flag prevents Trivy from sending API requests to identify dependencies. | `false` | +| `trivy.securityCheck` | Comma-separated list of what security issues to detect. | `vuln` | +| `trivy.timeout` | The duration to wait for scan completion | `5m0s` | +| `trivy.gitHubToken` | The GitHub access token to download [Trivy DB][trivy-db] (see [GitHub rate limiting][trivy-rate-limiting]) | | +| `trivy.priorityClassName` | The priority class to run the pod as | | +| `trivy.topologySpreadConstraints` | The priority class to run the pod as | | +| `trivy.initContainers` | Init containers to be run before the controller's container starts. | `[]` | +| **Database** | | | +| `database.type` | If external database is used, set it to `external` | `internal` | +| `database.internal.image.repository` | Repository for database image | `goharbor/harbor-db` | +| `database.internal.image.tag` | Tag for database image | `dev` | +| `database.internal.password` | The password for database | `changeit` | +| `database.internal.shmSizeLimit` | The limit for the size of shared memory for internal PostgreSQL, conventionally it's around 50% of the memory limit of the container | `512Mi` | +| `database.internal.resources` | The [resources] to allocate for container | undefined | +| `database.internal.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | +| `database.internal.initContainer.migrator.resources` | The [resources] to allocate for the database migrator initContainer | undefined | +| `database.internal.initContainer.permissions.resources` | The [resources] to allocate for the database permissions initContainer | undefined | +| `database.internal.nodeSelector` | Node labels for pod assignment | `{}` | +| `database.internal.tolerations` | Tolerations for pod assignment | `[]` | +| `database.internal.affinity` | Node/Pod affinities | `{}` | +| `database.internal.priorityClassName` | The priority class to run the pod as | | +| `database.internal.livenessProbe.timeoutSeconds` | The timeout used in liveness probe; 1 to 5 seconds | 1 | +| `database.internal.readinessProbe.timeoutSeconds` | The timeout used in readiness probe; 1 to 5 seconds | 1 | +| `database.internal.extrInitContainers` | Extra init containers to be run before the database's container starts. | `[]` | +| `database.external.host` | The hostname of external database | `192.168.0.1` | +| `database.external.port` | The port of external database | `5432` | +| `database.external.username` | The username of external database | `user` | +| `database.external.password` | The password of external database | `password` | +| `database.external.coreDatabase` | The database used by core service | `registry` | +| `database.external.existingSecret` | An existing password containing the database password. the key must be `password`. | `""` | +| `database.external.sslmode` | Connection method of external database (require, verify-full, verify-ca, disable) | `disable` | +| `database.maxIdleConns` | The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained. | `50` | +| `database.maxOpenConns` | The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections. | `100` | +| `database.podAnnotations` | Annotations to add to the database pod | `{}` | +| **Redis** | | | +| `redis.type` | If external redis is used, set it to `external` | `internal` | +| `redis.internal.image.repository` | Repository for redis image | `goharbor/redis-photon` | +| `redis.internal.image.tag` | Tag for redis image | `dev` | +| `redis.internal.resources` | The [resources] to allocate for container | undefined | +| `redis.internal.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | +| `redis.internal.nodeSelector` | Node labels for pod assignment | `{}` | +| `redis.internal.tolerations` | Tolerations for pod assignment | `[]` | +| `redis.internal.affinity` | Node/Pod affinities | `{}` | +| `redis.internal.priorityClassName` | The priority class to run the pod as | | +| `redis.internal.jobserviceDatabaseIndex` | The database index for jobservice | `1` | +| `redis.internal.registryDatabaseIndex` | The database index for registry | `2` | +| `redis.internal.trivyAdapterIndex` | The database index for trivy adapter | `5` | +| `redis.internal.harborDatabaseIndex` | The database index for harbor miscellaneous business logic | `0` | +| `redis.internal.cacheLayerDatabaseIndex` | The database index for harbor cache layer | `0` | +| `redis.internal.initContainers` | Init containers to be run before the redis's container starts. | `[]` | +| `redis.external.addr` | The addr of external Redis: :. When using sentinel, it should be :,:,: | `192.168.0.2:6379` | +| `redis.external.sentinelMasterSet` | The name of the set of Redis instances to monitor | | +| `redis.external.coreDatabaseIndex` | The database index for core | `0` | +| `redis.external.jobserviceDatabaseIndex` | The database index for jobservice | `1` | +| `redis.external.registryDatabaseIndex` | The database index for registry | `2` | +| `redis.external.trivyAdapterIndex` | The database index for trivy adapter | `5` | +| `redis.external.harborDatabaseIndex` | The database index for harbor miscellaneous business logic | `0` | +| `redis.external.cacheLayerDatabaseIndex` | The database index for harbor cache layer | `0` | +| `redis.external.username` | The username of external Redis | | +| `redis.external.password` | The password of external Redis | | +| `redis.external.existingSecret` | Use an existing secret to connect to redis. The key must be `REDIS_PASSWORD`. | `""` | +| `redis.podAnnotations` | Annotations to add to the redis pod | `{}` | +| **Exporter** | | | +| `exporter.replicas` | The replica count | `1` | +| `exporter.revisionHistoryLimit` | The revision history limit | `10` | +| `exporter.podAnnotations` | Annotations to add to the exporter pod | `{}` | +| `exporter.image.repository` | Repository for redis image | `goharbor/harbor-exporter` | +| `exporter.image.tag` | Tag for exporter image | `dev` | +| `exporter.nodeSelector` | Node labels for pod assignment | `{}` | +| `exporter.tolerations` | Tolerations for pod assignment | `[]` | +| `exporter.affinity` | Node/Pod affinities | `{}` | +| `exporter.topologySpreadConstraints` | Constraints that define how Pods are spread across failure-domains like regions or availability zones | `[]` | +| `exporter.automountServiceAccountToken` | Mount serviceAccountToken? | `false` | +| `exporter.cacheDuration` | the cache duration for information that exporter collected from Harbor | `30` | +| `exporter.cacheCleanInterval` | cache clean interval for information that exporter collected from Harbor | `14400` | +| `exporter.priorityClassName` | The priority class to run the pod as | | +| **Metrics** | | | +| `metrics.enabled` | if enable harbor metrics | `false` | +| `metrics.core.path` | the url path for core metrics | `/metrics` | +| `metrics.core.port` | the port for core metrics | `8001` | +| `metrics.registry.path` | the url path for registry metrics | `/metrics` | +| `metrics.registry.port` | the port for registry metrics | `8001` | +| `metrics.exporter.path` | the url path for exporter metrics | `/metrics` | +| `metrics.exporter.port` | the port for exporter metrics | `8001` | +| `metrics.serviceMonitor.enabled` | create prometheus serviceMonitor. Requires prometheus CRD's | `false` | +| `metrics.serviceMonitor.additionalLabels` | additional labels to upsert to the manifest | `""` | +| `metrics.serviceMonitor.interval` | scrape period for harbor metrics | `""` | +| `metrics.serviceMonitor.metricRelabelings` | metrics relabel to add/mod/del before ingestion | `[]` | +| `metrics.serviceMonitor.relabelings` | relabels to add/mod/del to sample before scrape | `[]` | +| **Trace** | | | +| `trace.enabled` | Enable tracing or not | `false` | +| `trace.provider` | The tracing provider: `jaeger` or `otel`. `jaeger` should be 1.26+ | `jaeger` | +| `trace.sample_rate` | Set `sample_rate` to 1 if you want sampling 100% of trace data; set 0.5 if you want sampling 50% of trace data, and so forth | `1` | +| `trace.namespace` | Namespace used to differentiate different harbor services | | +| `trace.attributes` | `attributes` is a key value dict contains user defined attributes used to initialize trace provider | | +| `trace.jaeger.endpoint` | The endpoint of jaeger | `http://hostname:14268/api/traces` | +| `trace.jaeger.username` | The username of jaeger | | +| `trace.jaeger.password` | The password of jaeger | | +| `trace.jaeger.agent_host` | The agent host of jaeger | | +| `trace.jaeger.agent_port` | The agent port of jaeger | `6831` | +| `trace.otel.endpoint` | The endpoint of otel | `hostname:4318` | +| `trace.otel.url_path` | The URL path of otel | `/v1/traces` | +| `trace.otel.compression` | Whether enable compression or not for otel | `false` | +| `trace.otel.insecure` | Whether establish insecure connection or not for otel | `true` | +| `trace.otel.timeout` | The timeout in seconds of otel | `10` | +| **Cache** | | | +| `cache.enabled` | Enable cache layer or not | `false` | +| `cache.expireHours` | The expire hours of cache layer | `24` | [resources]: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ [trivy]: https://github.com/aquasecurity/trivy diff --git a/templates/trivy/trivy-sts.yaml b/templates/trivy/trivy-sts.yaml index 7e34ee9c7..781192dbb 100644 --- a/templates/trivy/trivy-sts.yaml +++ b/templates/trivy/trivy-sts.yaml @@ -102,6 +102,10 @@ spec: value: {{ .Values.trivy.skipUpdate | default false | quote }} - name: "SCANNER_TRIVY_SKIP_JAVA_DB_UPDATE" value: {{ .Values.trivy.skipJavaDBUpdate | default false | quote }} + - name: "SCANNER_TRIVY_DB_REPOSITORY" + value: {{ .Values.trivy.dbRepository | quote }} + - name: "SCANNER_TRIVY_JAVA_DB_REPOSITORY" + value: {{ .Values.trivy.javaDBRepository | quote }} - name: "SCANNER_TRIVY_OFFLINE_SCAN" value: {{ .Values.trivy.offlineScan | default false | quote }} - name: "SCANNER_TRIVY_SECURITY_CHECKS" diff --git a/values.yaml b/values.yaml index c862337d0..a80f866d5 100644 --- a/values.yaml +++ b/values.yaml @@ -863,9 +863,12 @@ trivy: # If the value is set to `true` you have to manually download the `trivy.db` file and mount it in the # `/home/scanner/.cache/trivy/db/trivy.db` path. skipUpdate: false + # OCI repository to retrieve the trivy vulnerability database from. + dbRepository: "ghcr.io/aquasecurity/trivy-db" + # OCI repository to retrieve the Java trivy vulnerability database from. + javaDBRepository: "ghcr.io/aquasecurity/trivy-java-db" # skipJavaDBUpdate If the flag is enabled you have to manually download the `trivy-java.db` file and mount it in the # `/home/scanner/.cache/trivy/java-db/trivy-java.db` path - # skipJavaDBUpdate: false # The offlineScan option prevents Trivy from sending API requests to identify dependencies. #