diff --git a/.ci/scripts/generate-portainer-manifests-from-chart.sh b/.ci/scripts/generate-portainer-manifests-from-chart.sh index 2897292..2f896b3 100755 --- a/.ci/scripts/generate-portainer-manifests-from-chart.sh +++ b/.ci/scripts/generate-portainer-manifests-from-chart.sh @@ -15,7 +15,8 @@ # 2. Remove the header produced by helf --dry-run # 3. Remove references to helm in rendered manifests (no point attaching a label like "app.kubernetes.io/managed-by: Helm" if we are not!) -helm install --no-hooks --namespace zorgburger --set disableTest=true --dry-run zorgburger charts/portainer \ +# Create nodeport manifest for ce +helm install --no-hooks --namespace zorgburger --set service.type=NodePort --set disableTest=true --set createNamespace=true --dry-run zorgburger charts/portainer \ | sed -n '1,/NOTES/p' | sed \$d \ | grep -vE 'NAME|LAST DEPLOYED|NAMESPACE|STATUS|REVISION|HOOKS|MANIFEST|TEST SUITE' \ | grep -iv helm \ @@ -23,10 +24,30 @@ helm install --no-hooks --namespace zorgburger --set disableTest=true --dry-run | sed 's/portainer-portainer/portainer/' \ > deploy/manifests/portainer/portainer.yaml -helm install --no-hooks --namespace zorgburger --set service.type=LoadBalancer --set disableTest=true --dry-run zorgburger charts/portainer \ + +# Create lb manifest for ce +helm install --no-hooks --namespace zorgburger --set service.type=LoadBalancer --set disableTest=true --set createNamespace=true --dry-run zorgburger charts/portainer \ | sed -n '1,/NOTES/p' | sed \$d \ | grep -vE 'NAME|LAST DEPLOYED|NAMESPACE|STATUS|REVISION|HOOKS|MANIFEST|TEST SUITE' \ | grep -iv helm \ | sed 's/zorgburger/portainer/' \ | sed 's/portainer-portainer/portainer/' \ > deploy/manifests/portainer/portainer-lb.yaml + +# Create nodeport manifest for ee +helm install --no-hooks --namespace zorgburger --set enterpriseEdition.enabled=true --set service.type=NodePort --set disableTest=true --set createNamespace=true --dry-run zorgburger charts/portainer \ +| sed -n '1,/NOTES/p' | sed \$d \ +| grep -vE 'NAME|LAST DEPLOYED|NAMESPACE|STATUS|REVISION|HOOKS|MANIFEST|TEST SUITE' \ +| grep -iv helm \ +| sed 's/zorgburger/portainer/' \ +| sed 's/portainer-portainer/portainer/' \ +> deploy/manifests/portainer/portainer-ee.yaml + +# Create lb manifest for ee +helm install --no-hooks --namespace zorgburger --set enterpriseEdition.enabled=true --set service.type=LoadBalancer --set disableTest=true --set createNamespace=true --dry-run zorgburger charts/portainer \ +| sed -n '1,/NOTES/p' | sed \$d \ +| grep -vE 'NAME|LAST DEPLOYED|NAMESPACE|STATUS|REVISION|HOOKS|MANIFEST|TEST SUITE' \ +| grep -iv helm \ +| sed 's/zorgburger/portainer/' \ +| sed 's/portainer-portainer/portainer/' \ +> deploy/manifests/portainer/portainer-lb-ee.yaml diff --git a/.ci/scripts/local-ct-lint.sh b/.ci/scripts/local-ct-lint.sh old mode 100755 new mode 100644 diff --git a/.ci/scripts/local-kube-score.sh b/.ci/scripts/local-kube-score.sh old mode 100755 new mode 100644 diff --git a/.github/workflows/on-push-lint-charts.yml b/.github/workflows/on-push-lint-charts.yml index 4e73450..603a48a 100644 --- a/.github/workflows/on-push-lint-charts.yml +++ b/.github/workflows/on-push-lint-charts.yml @@ -4,7 +4,15 @@ on: push: paths: - 'charts/**' - - '.github/**' + - '.github/**' + pull_request: + branches: + - master + workflow_dispatch: + +env: + KUBE_SCORE_VERSION: 1.10.0 + HELM_VERSION: v3.4.1 jobs: lint-test: @@ -13,22 +21,83 @@ jobs: - name: Checkout uses: actions/checkout@v1 + - name: Set up Helm + uses: azure/setup-helm@v1 + with: + version: ${{ env.HELM_VERSION }} + + - name: Set up kube-score + run: | + wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score + chmod 755 kube-score + + - name: Kube-score generated manifests + run: helm template charts/* | ./kube-score score - + --ignore-test pod-networkpolicy + --ignore-test deployment-has-poddisruptionbudget + --ignore-test deployment-has-host-podantiaffinity + --ignore-test container-security-context + --ignore-test container-resources + --ignore-test pod-probes + --ignore-test container-image-tag + --enable-optional-test container-security-context-privileged + + # python is a requirement for the chart-testing action below (supports yamllint among other tests) + - uses: actions/setup-python@v2 + with: + python-version: 3.13.1 + + - name: Set up chart-testing + uses: helm/chart-testing-action@v2.0.1 + + - name: Run chart-testing (list-changed) + id: list-changed + run: | + changed=$(ct list-changed --config .ci/ct-config.yaml) + if [[ -n "$changed" ]]; then + echo "::set-output name=changed::true" + fi + - name: Run chart-testing (lint) - id: lint - uses: helm/chart-testing-action@v1.0.0 + run: ct lint --config .ci/ct-config.yaml + + # Refer to https://github.com/kubernetes-sigs/kind/releases when updating the node_images + - name: Create 1.28 kind cluster + uses: helm/kind-action@v1.4.0 + with: + node_image: kindest/node:v1.28.13@sha256:45d319897776e11167e4698f6b14938eb4d52eb381d9e3d7a9086c16c69a8110 + cluster_name: kubernetes-1.28 + if: steps.list-changed.outputs.changed == 'true' + + - name: Run chart-testing (install) against 1.28 + run: ct install --config .ci/ct-config.yaml + + - name: Create 1.29 kind cluster + uses: helm/kind-action@v1.4.0 with: - config: .ci/ct-config.yaml - command: lint + node_image: kindest/node:v1.29.8@sha256:d46b7aa29567e93b27f7531d258c372e829d7224b25e3fc6ffdefed12476d3aa + cluster_name: kubernetes-1.29 + if: steps.list-changed.outputs.changed == 'true' + + - name: Run chart-testing (install) against 1.29 + run: ct install --config .ci/ct-config.yaml - - name: Create kind cluster - uses: helm/kind-action@v1.0.0 + - name: Create 1.30 kind cluster + uses: helm/kind-action@v1.4.0 with: - install_local_path_provisioner: true - # Only build a kind cluster if there are chart changes to test. - if: steps.lint.outputs.changed == 'true' + node_image: kindest/node:v1.30.4@sha256:976ea815844d5fa93be213437e3ff5754cd599b040946b5cca43ca45c2047114 + cluster_name: kubernetes-1.30 + if: steps.list-changed.outputs.changed == 'true' - - name: Run chart-testing (install) - uses: helm/chart-testing-action@v1.0.0 + - name: Run chart-testing (install) against 1.30 + run: ct install --config .ci/ct-config.yaml + + - name: Create 1.31 kind cluster + uses: helm/kind-action@v1.4.0 with: - command: install - config: .ci/ct-config.yaml + node_image: kindest/node:v1.31.0@sha256:53df588e04085fd41ae12de0c3fe4c72f7013bba32a20e7325357a1ac94ba865 + cluster_name: kubernetes-1.31 + if: steps.list-changed.outputs.changed == 'true' + + - name: Run chart-testing (install) against 1.31 + run: ct install --config .ci/ct-config.yaml \ No newline at end of file diff --git a/.github/workflows/on-push-master-publish-chart.yml b/.github/workflows/on-push-master-publish-chart.yml index a6f52d5..e607731 100644 --- a/.github/workflows/on-push-master-publish-chart.yml +++ b/.github/workflows/on-push-master-publish-chart.yml @@ -6,7 +6,8 @@ on: - master paths: - 'charts/**' - - '.github/**' + - '.github/**' + - 'deploy/manifests/**' jobs: build: @@ -25,6 +26,13 @@ jobs: git config user.email "$GITHUB_ACTOR@users.noreply.github.com" - name: Run chart-releaser - uses: helm/chart-releaser-action@v1.0.0 + uses: helm/chart-releaser-action@v1.1.0 env: - CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" \ No newline at end of file + CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + + - name: sync gh-pages branch + uses: repo-sync/pull-request@v2 + with: + destination_branch: "gh-pages" + github_token: "${{ secrets.GITHUB_TOKEN }}" + pr_allow_empty: false \ No newline at end of file diff --git a/README.md b/README.md index 2ee75b0..6497ab7 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,30 @@ -# Portainer Kubernetes Deployment -This repo contains helm and YAML (yuch) for deploying Portainer into a Kubernetes environment +This repo contains helm and YAML for deploying Portainer into a Kubernetes environment. Follow the applicable instructions for your edition / deployment methodology below: +- [Deploying with Helm](#deploying-with-helm) + - [Community Edition](#community-edition) + - [Using NodePort on a local/remote cluster](#using-nodeport-on-a-localremote-cluster) + - [Using a cloud provider's loadbalancer](#using-a-cloud-providers-loadbalancer) + - [Using ClusterIP with an ingress](#using-clusterip-with-an-ingress) + - [Enterprise Edition](#enterprise-edition) + - [Using NodePort on a local/remote cluster](#using-nodeport-on-a-localremote-cluster-1) + - [Using a cloud provider's loadbalancer](#using-a-cloud-providers-loadbalancer-1) + - [Using ClusterIP with an ingress](#using-clusterip-with-an-ingress-1) +- [Deploying with manifests](#deploying-with-manifests) + - [Community Edition](#community-edition-1) + - [Using NodePort on a local/remote cluster](#using-nodeport-on-a-localremote-cluster-2) + - [Using a cloud provider's loadbalancer](#using-a-cloud-providers-loadbalancer-2) + - [Enterprise Edition](#enterprise-edition-1) + - [Using NodePort on a local/remote cluster](#using-nodeport-on-a-localremote-cluster-3) + - [Using a cloud provider's loadbalancer](#using-a-cloud-providers-loadbalancer-3) +- [Note re persisting data](#note-re-persisting-data) -## Quickstart with Helm + + + +# Deploying with Helm + +Ensure you're using at least helm v3.2, which [includes support](https://github.com/helm/helm/pull/7648) for the `--create-namespace` argument. Install the repository: @@ -12,55 +33,114 @@ helm repo add portainer https://portainer.github.io/k8s/ helm repo update ``` -Create the portainer namespace: +## Community Edition + +Install the helm chart: + +### Using NodePort on a local/remote cluster ``` -kubectl create namespace portainer +helm install --create-namespace -n portainer portainer portainer/portainer ``` -Install the helm chart: +### Using a cloud provider's loadbalancer + +``` +helm install --create-namespace -n portainer portainer portainer/portainer \ +--set service.type=LoadBalancer +``` + + +### Using ClusterIP with an ingress + +``` +helm install --create-namespace -n portainer portainer portainer/portainer \ +--set service.type=ClusterIP +``` + +For advanced helm customization, see the [chart README](/charts/portainer/README.md) + +## Enterprise Edition ### Using NodePort on a local/remote cluster ``` -helm install -n portainer portainer portainer/portainer +helm install --create-namespace -n portainer portainer portainer/portainer \ +--set enterpriseEdition.enabled=true ``` ### Using a cloud provider's loadbalancer ``` -helm install -n portainer portainer portainer/portainer --set service.type=LoadBalancer +helm install --create-namespace -n portainer portainer portainer/portainer \ +--set enterpriseEdition.enabled=true \ +--set service.type=LoadBalancer ``` ### Using ClusterIP with an ingress ``` -helm install -n portainer portainer portainer/portainer --set service.type=ClusterIP +helm install --create-namespace -n portainer portainer portainer/portainer \ +--set enterpriseEdition.enabled=true \ +--set service.type=ClusterIP ``` For advanced helm customization, see the [chart README](/charts/portainer/README.md) -## Quickstart with manifests +# Deploying with manifests -If you're not into helm, you can install Portainer using manifests, by first creating the portainer namespace: +If you're not using helm, you can install Portainer using manifests directly, as follows + +## Community Edition + +### Using NodePort on a local/remote cluster ``` -kubectl create namespace portainer +kubectl apply -f https://raw.githubusercontent.com/portainer/k8s/master/deploy/manifests/portainer/portainer.yaml ``` -And then... +### Using a cloud provider's loadbalancer + +``` +kubectl apply -f https://raw.githubusercontent.com/portainer/k8s/master/deploy/manifests/portainer/portainer-lb.yaml +``` + +## Enterprise Edition ### Using NodePort on a local/remote cluster ``` -kubectl create namespace portainer -kubectl apply -n portainer -f https://portainer.github.io/k8s//deploy/manifests/portainer/portainer.yaml +kubectl apply- f https://raw.githubusercontent.com/portainer/k8s/master/deploy/manifests/portainer/portainer-ee.yaml ``` ### Using a cloud provider's loadbalancer ``` -kubectl create namespace portainer -kubectl apply -n portainer -f https://portainer.github.io/k8s//deploy/manifests/portainer/portainer-lb.yaml +kubectl apply -f https://raw.githubusercontent.com/portainer/k8s/master/deploy/manifests/portainer/portainer-lb-ee.yaml +``` + +# Note re persisting data + +The charts/manifests will create a persistent volume for storing Portainer data, using the default StorageClass. + +In some Kubernetes clusters (microk8s), the default Storage Class simply creates hostPath volumes, which are not explicitly tied to a particular node. In a multi-node cluster, this can create an issue when the pod is terminated and rescheduled on a different node, "leaving" all the persistent data behind and starting the pod with an "empty" volume. + +While this behaviour is inherently a limitation of using hostPath volumes, a suitable workaround is to use add a nodeSelector to the deployment, which effectively "pins" the portainer pod to a particular node. + +The nodeSelector can be added in the following ways: + +1. Edit your own values.yaml and set the value of nodeSelector like this: + +``` +nodeSelector: + kubernetes.io/hostname: +``` + +2. Explicictly set the target node when deploying/updating the helm chart on the CLI, by including `--set nodeSelector.kubernetes.io/hostname=` + +3. If you've deployed Portainer via manifests, without Helm, run the following one-liner to "patch" the deployment, forcing the pod to always be scheduled on the node it's currently running on: + +``` +kubectl patch deployments -n portainer portainer -p '{"spec": {"template": {"spec": {"nodeSelector": {"kubernetes.io/hostname": "'$(kubectl get pods -n portainer -o jsonpath='{ ..nodeName }')'"}}}}}' || (echo Failed to identify current node of portainer pod; exit 1) ``` \ No newline at end of file diff --git a/charts/portainer/Chart.yaml b/charts/portainer/Chart.yaml index 8c03c18..29df7d2 100644 --- a/charts/portainer/Chart.yaml +++ b/charts/portainer/Chart.yaml @@ -16,16 +16,16 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 1.0.1 +version: 1.0.59 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 2.0.0 +appVersion: ce-latest-ee-2.21.5 sources: - https://github.com/portainer/k8s maintainers: - - name: funkypenguin - email: davidy@funkypenguin.co.nz - url: https://www.funkypenguin.co.nz \ No newline at end of file + - name: Portainer + email: platform-team@portainer.io + url: https://www.portainer.io diff --git a/charts/portainer/README.md b/charts/portainer/README.md index f70333d..7c600d5 100644 --- a/charts/portainer/README.md +++ b/charts/portainer/README.md @@ -61,26 +61,33 @@ The following table lists the configurable parameters of the Portainer chart and | `image.tag` | Tag for the Portainer image | `latest` | | `image.pullPolicy` | Portainer image pulling policy | `IfNotPresent` | | `imagePullSecrets` | If Portainer image requires to be in a private repository | `nil` | +| `nodeSelector` | Used to apply a nodeSelector to the deployment | `{}` | | `serviceAccount.annotations` | Annotations to add to the service account | `null` | | `serviceAccount.name` | The name of the service account to use | `portainer-sa-clusteradmin` | +| `localMgmt` | Enables or disables the creation of SA, Roles in local cluster where Portainer runs, only change when you don't need to manage the local cluster through this Portainer instance | `true` | | `service.type` | Service Type for the main Portainer Service; ClusterIP, NodePort and LoadBalancer | `LoadBalancer` | | `service.httpPort` | HTTP port for accessing Portainer Web | `9000` | | `service.httpNodePort` | Static NodePort for accessing Portainer Web. Specify only if the type is NodePort | `30777` | | `service.edgePort` | TCP port for accessing Portainer Edge | `8000` | | `service.edgeNodePort` | Static NodePort for accessing Portainer Edge. Specify only if the type is NodePort | `30776` | +| `service.annotations` | Annotations to add to the service | `{}` | +| `feature.flags` | Enable one or more features separated by spaces. For instance, `--feat=open-amt` | `nil` | | `ingress.enabled` | Create an ingress for Portainer | `false` | +| `ingress.ingressClassName` | For Kubernetes >= 1.18 you should specify the ingress-controller via the field `ingressClassName`. For instance, `nginx` | `nil` | | `ingress.annotations` | Annotations to add to the ingress. For instane, `kubernetes.io/ingress.class: nginx` | `{}` | | `ingress.hosts.host` | URL for Portainer Web. For instance, `portainer.example.io` | `nil` | | `ingress.hosts.paths.path` | Path for the Portainer Web. | `/` | | `ingress.hosts.paths.port` | Port for the Portainer Web. | `9000` | | `ingress.tls` | TLS support on ingress. Must create a secret with TLS certificates in advance | `[]` | | `resources` | Portainer resource requests and limits | `{}` | +| `tls.force` | Force Portainer to be configured to use TLS only | `false` | +| `tls.existingSecret` | Mount the existing TLS secret into the pod | `""` | +| `mtls.enable` | Option to specicy mtls Certs to be used by Portainer | `false` | +| `mtls.existingSecret` | Mount the existing mtls secret into the pod | `""` | | `persistence.enabled` | Whether to enable data persistence | `true` | | `persistence.existingClaim` | Name of an existing PVC to use for data persistence | `nil` | -| `persistence.size` | Size of the PVC used for persistence | `1Gi` | +| `persistence.size` | Size of the PVC used for persistence | `10Gi` | | `persistence.annotations` | Annotations to apply to PVC used for persistence | `{}` | | `persistence.storageClass` | StorageClass to apply to PVC used for persistence | `default` | | `persistence.accessMode` | AccessMode for persistence | `ReadWriteOnce` | -| `persistence.selector` | Selector for persistence | `nil` | - - +| `persistence.selector` | Selector for persistence | `nil` | \ No newline at end of file diff --git a/charts/portainer/templates/NOTES.txt b/charts/portainer/templates/NOTES.txt index b5527ac..afc6d1e 100644 --- a/charts/portainer/templates/NOTES.txt +++ b/charts/portainer/templates/NOTES.txt @@ -1,21 +1,27 @@ -1. Get the application URL by running these commands: {{- if .Values.ingress.enabled }} {{- range $host := .Values.ingress.hosts }} {{- range .paths }} - http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ if .port }}:{{ .port }}{{ else }}:9000{{ end }}{{.path}} + Use the URL below to access the application + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ if .port }}:{{ .port }}{{ else }}{{ end }}{{.path}} {{- end }} {{- end }} {{- else if contains "NodePort" .Values.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "portainer.fullname" . }}) + Get the application URL by running these commands: + {{- if .Values.tls.force }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "portainer.fullname" . }}) + {{- else }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[1].nodePort}" services {{ include "portainer.fullname" . }}) + {{- end}} export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT + echo https://$NODE_IP:$NODE_PORT {{- else if contains "LoadBalancer" .Values.service.type }} + Get the application URL by running these commands: NOTE: It may take a few minutes for the LoadBalancer IP to be available. You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "portainer.fullname" . }}' export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "portainer.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") - echo http://$SERVICE_IP:{{ .Values.service.httpPort }} + echo https://$SERVICE_IP:{{ .Values.service.httpsPort }} {{- else if contains "ClusterIP" .Values.service.type }} - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "portainer.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") - echo "Visit http://127.0.0.1:9000 to use your application" - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9000:9000 + Get the application URL by running these commands: + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "portainer.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].me$ echo "Visit http://127.0.0.1:9443 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9443:9443 {{- end }} diff --git a/charts/portainer/templates/_helpers.tpl b/charts/portainer/templates/_helpers.tpl index 893e401..b428447 100644 --- a/charts/portainer/templates/_helpers.tpl +++ b/charts/portainer/templates/_helpers.tpl @@ -71,4 +71,17 @@ Provide a pre-defined claim or a claim based on the Release {{- else -}} {{- template "portainer.fullname" . }} {{- end -}} +{{- end -}} + +{{/* +Generate a right Ingress apiVersion +*/}} +{{- define "ingress.apiVersion" -}} +{{- if semverCompare ">=1.20-0" .Capabilities.KubeVersion.GitVersion -}} +networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +networking.k8s.io/v1beta1 +{{- else -}} +extensions/v1 +{{- end }} {{- end -}} \ No newline at end of file diff --git a/charts/portainer/templates/deployment.yaml b/charts/portainer/templates/deployment.yaml index f743a5e..310aed9 100644 --- a/charts/portainer/templates/deployment.yaml +++ b/charts/portainer/templates/deployment.yaml @@ -18,39 +18,181 @@ spec: labels: {{- include "portainer.selectorLabels" . | nindent 8 }} spec: - {{- with .Values.imagePullSecrets }} + nodeSelector: {{- toYaml .Values.nodeSelector | nindent 8 }} + tolerations: {{- toYaml .Values.tolerations | nindent 8 -}} + {{- with .Values.imagePullSecrets }} imagePullSecrets: {{- toYaml . | nindent 8 }} - {{- end }} + {{- end }} + {{- if .Values.localMgmt }} serviceAccountName: {{ include "portainer.serviceAccountName" . }} + {{- end }} volumes: - - name: "data" - persistentVolumeClaim: - claimName: {{ template "portainer.pvcName" . }} + {{- if .Values.persistence.enabled }} + - name: "data" + persistentVolumeClaim: + claimName: {{ template "portainer.pvcName" . }} + {{- end }} + {{- if .Values.tls.existingSecret }} + - name: certs + secret: + secretName: {{ .Values.tls.existingSecret }} + {{- end }} + {{- if .Values.mtls.existingSecret }} + - name: mtlscerts + secret: + secretName: {{ .Values.mtls.existingSecret }} + {{- end }} containers: - name: {{ .Chart.Name }} + {{- if .Values.enterpriseEdition.enabled }} + image: "{{ .Values.enterpriseEdition.image.repository }}:{{ .Values.enterpriseEdition.image.tag }}" + imagePullPolicy: {{ .Values.enterpriseEdition.image.pullPolicy }} + {{- else }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" imagePullPolicy: {{ .Values.image.pullPolicy }} - {{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.edgeNodePort))) }} - args: [ '--tunnel-port','{{ .Values.service.edgeNodePort }}' ] - {{- end }} + {{- end }} + args: + {{- if .Values.tls.force }} + - --http-disabled + {{- end }} + {{- if .Values.tls.existingSecret }} + - --sslcert=/certs/tls.crt + - --sslkey=/certs/tls.key + {{- end }} + {{- if .Values.mtls.existingSecret }} + - --mtlscacert=/certs/mtls/mtlsca.crt + - --mtlscert=/certs/mtls/mtlscert.crt + - --mtlskey=/certs/mtls/mtlskey.key + {{- end }} + {{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.edgeNodePort))) }} + - '--tunnel-port={{ .Values.service.edgeNodePort }}' + {{- end }} + {{- range .Values.feature.flags }} + - {{ . | squote }} + {{- end }} volumeMounts: + {{- if .Values.persistence.enabled }} - name: data mountPath: /data + {{- end }} + {{- if .Values.tls.existingSecret }} + - name: certs + mountPath: /certs + readOnly: true + {{- end }} + {{- if .Values.mtls.existingSecret }} + - name: mtlscerts + mountPath: /certs/mtls + readOnly: true + {{- end }} ports: + {{- if not .Values.tls.force }} - name: http containerPort: 9000 protocol: TCP + {{- end }} + - name: https + containerPort: 9443 + protocol: TCP - name: tcp-edge containerPort: 8000 - protocol: TCP + protocol: TCP livenessProbe: + failureThreshold: 5 + initialDelaySeconds: 45 + periodSeconds: 30 httpGet: path: / + {{- if .Values.tls.force }} + port: 9443 + scheme: HTTPS + {{- else }} + {{- if .Values.enterpriseEdition.enabled }} + {{- if regexMatch "^[0-9]+\\.[0-9]+\\.[0-9]+$" .Values.enterpriseEdition.image.tag }} + {{- if eq (semver .Values.enterpriseEdition.image.tag | (semver "2.7.0").Compare) -1 }} + port: 9443 + scheme: HTTPS + {{- else }} + port: 9000 + scheme: HTTP + {{- end }} + {{- else }} + {{- if eq .Values.enterpriseEdition.image.tag "latest" }} + port: 9443 + scheme: HTTPS + {{- else }} + port: 9000 + scheme: HTTP + {{- end }} + {{- end}} + {{- else }} + {{- if regexMatch "^[0-9]+\\.[0-9]+\\.[0-9]+$" .Values.image.tag }} + {{- if eq (semver .Values.image.tag | (semver "2.6.0").Compare) -1 }} + port: 9443 + scheme: HTTPS + {{- else }} port: 9000 + scheme: HTTP + {{- end}} + {{- else }} + {{- if eq .Values.image.tag "latest" }} + port: 9443 + scheme: HTTPS + {{- else }} + port: 9000 + scheme: HTTP + {{- end }} + {{- end }} + {{- end }} + {{- end }} readinessProbe: + failureThreshold: 5 + initialDelaySeconds: 45 + periodSeconds: 30 httpGet: path: / + {{- if .Values.tls.force }} + port: 9443 + scheme: HTTPS + {{- else }} + {{- if .Values.enterpriseEdition.enabled }} + {{- if regexMatch "^[0-9]+\\.[0-9]+\\.[0-9]+$" .Values.enterpriseEdition.image.tag }} + {{- if eq (semver .Values.enterpriseEdition.image.tag | (semver "2.7.0").Compare) -1 }} + port: 9443 + scheme: HTTPS + {{- else }} + port: 9000 + scheme: HTTP + {{- end }} + {{- else }} + {{- if eq .Values.enterpriseEdition.image.tag "latest" }} + port: 9443 + scheme: HTTPS + {{- else }} + port: 9000 + scheme: HTTP + {{- end }} + {{- end}} + {{- else }} + {{- if regexMatch "^[0-9]+\\.[0-9]+\\.[0-9]+$" .Values.image.tag }} + {{- if eq (semver .Values.image.tag | (semver "2.6.0").Compare) -1 }} + port: 9443 + scheme: HTTPS + {{- else }} + port: 9000 + scheme: HTTP + {{- end}} + {{- else }} + {{- if eq .Values.image.tag "latest" }} + port: 9443 + scheme: HTTPS + {{- else }} port: 9000 + scheme: HTTP + {{- end }} + {{- end }} + {{- end }} + {{- end }} resources: {{- toYaml .Values.resources | nindent 12 }} diff --git a/charts/portainer/templates/ingress.yaml b/charts/portainer/templates/ingress.yaml index 2f736b0..50d51d4 100644 --- a/charts/portainer/templates/ingress.yaml +++ b/charts/portainer/templates/ingress.yaml @@ -1,10 +1,8 @@ {{- if .Values.ingress.enabled -}} {{- $fullName := include "portainer.fullname" . -}} -{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} -apiVersion: networking.k8s.io/v1beta1 -{{- else -}} -apiVersion: extensions/v1beta1 -{{- end }} +{{- $tlsforced := .Values.tls.force -}} +{{- $apiVersion := include "ingress.apiVersion" . -}} +apiVersion: {{ $apiVersion }} kind: Ingress metadata: name: {{ $fullName }} @@ -16,6 +14,9 @@ metadata: {{- toYaml . | nindent 4 }} {{- end }} spec: +{{- with .Values.ingress.ingressClassName }} + ingressClassName: {{ . }} +{{- end }} {{- if .Values.ingress.tls }} tls: {{- range .Values.ingress.tls }} @@ -33,9 +34,27 @@ spec: paths: {{- range .paths }} - path: {{ .path | default "/" }} + {{- if eq $apiVersion "networking.k8s.io/v1" }} + pathType: Prefix + {{- end }} backend: + {{- if eq $apiVersion "networking.k8s.io/v1" }} + service: + name: {{ $fullName }} + port: + {{- if $tlsforced }} + number: {{ .port | default 9443 }} + {{- else }} + number: {{ .port | default 9000 }} + {{- end }} + {{- else }} serviceName: {{ $fullName }} + {{- if $tlsforced }} + servicePort: {{ .port | default 9443 }} + {{- else }} servicePort: {{ .port | default 9000 }} + {{- end }} + {{- end }} {{- end }} {{- end }} -{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/portainer/templates/namespace.yaml b/charts/portainer/templates/namespace.yaml new file mode 100644 index 0000000..a2aec89 --- /dev/null +++ b/charts/portainer/templates/namespace.yaml @@ -0,0 +1,8 @@ +{{ if .Values.createNamespace }} +apiVersion: v1 +kind: Namespace +metadata: + name: portainer + labels: + pod-security.kubernetes.io/enforce: privileged +{{ end }} \ No newline at end of file diff --git a/charts/portainer/templates/pvc.yaml b/charts/portainer/templates/pvc.yaml index 172e185..60cf1cf 100644 --- a/charts/portainer/templates/pvc.yaml +++ b/charts/portainer/templates/pvc.yaml @@ -1,30 +1,30 @@ +{{- if .Values.persistence.enabled -}} {{- if not .Values.persistence.existingClaim -}} --- kind: "PersistentVolumeClaim" apiVersion: "v1" metadata: name: {{ template "portainer.fullname" . }} - namespace: {{ .Release.Namespace }} + namespace: {{ .Release.Namespace }} annotations: - {{- if .Values.persistence.storageClass }} - volume.beta.kubernetes.io/storage-class: {{ .Values.persistence.storageClass | quote }} - {{- else }} - volume.alpha.kubernetes.io/storage-class: "generic" - {{- end }} {{- if .Values.persistence.annotations }} {{ toYaml .Values.persistence.annotations | indent 2 }} {{ end }} labels: io.portainer.kubernetes.application.stack: portainer - {{- include "portainer.labels" . | nindent 4 }} + {{- include "portainer.labels" . | nindent 4 }} spec: accessModes: - {{ default "ReadWriteOnce" .Values.persistence.accessMode | quote }} resources: requests: storage: {{ .Values.persistence.size | quote }} + {{- if .Values.persistence.storageClass }} + storageClassName: {{ .Values.persistence.storageClass | quote }} + {{ end }} {{- if .Values.persistence.selector }} selector: {{ toYaml .Values.persistence.selector | indent 4 }} {{ end }} {{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/portainer/templates/rbac.yaml b/charts/portainer/templates/rbac.yaml index 079e080..f28ddca 100644 --- a/charts/portainer/templates/rbac.yaml +++ b/charts/portainer/templates/rbac.yaml @@ -1,3 +1,4 @@ +{{- if .Values.localMgmt }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: @@ -11,4 +12,5 @@ roleRef: subjects: - kind: ServiceAccount namespace: {{ .Release.Namespace }} - name: {{ include "portainer.serviceAccountName" . }} \ No newline at end of file + name: {{ include "portainer.serviceAccountName" . }} +{{- end }} \ No newline at end of file diff --git a/charts/portainer/templates/service.yaml b/charts/portainer/templates/service.yaml index c9292fe..b6f12e5 100644 --- a/charts/portainer/templates/service.yaml +++ b/charts/portainer/templates/service.yaml @@ -6,9 +6,16 @@ metadata: labels: io.portainer.kubernetes.application.stack: portainer {{- include "portainer.labels" . | nindent 4 }} + {{- if .Values.service.annotations }} + annotations: + {{- range $key, $value := .Values.service.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} spec: type: {{ .Values.service.type }} ports: + {{- if not .Values.tls.force }} - port: {{ .Values.service.httpPort }} targetPort: 9000 protocol: TCP @@ -16,12 +23,25 @@ spec: {{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.httpNodePort))) }} nodePort: {{ .Values.service.httpNodePort}} {{- end }} + {{- end }} + - port: {{ .Values.service.httpsPort }} + targetPort: 9443 + protocol: TCP + name: https + {{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.httpsNodePort))) }} + nodePort: {{ .Values.service.httpsNodePort}} + {{- end }} + {{- if (eq .Values.service.type "NodePort") }} + - port: {{ .Values.service.edgeNodePort }} + targetPort: {{ .Values.service.edgeNodePort }} + {{- else }} - port: {{ .Values.service.edgePort }} - targetPort: 8000 + targetPort: {{ .Values.service.edgePort }} + {{- end }} protocol: TCP name: edge {{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.edgeNodePort))) }} nodePort: {{ .Values.service.edgeNodePort }} - {{- end }} + {{- end }} selector: {{- include "portainer.selectorLabels" . | nindent 4 }} diff --git a/charts/portainer/templates/serviceaccount.yaml b/charts/portainer/templates/serviceaccount.yaml index 2b9630e..042996d 100644 --- a/charts/portainer/templates/serviceaccount.yaml +++ b/charts/portainer/templates/serviceaccount.yaml @@ -1,3 +1,4 @@ +{{- if .Values.localMgmt }} apiVersion: v1 kind: ServiceAccount metadata: @@ -9,3 +10,4 @@ metadata: annotations: {{- toYaml . | nindent 4 }} {{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/portainer/values.yaml b/charts/portainer/values.yaml index 0fdcad6..dc24fb4 100644 --- a/charts/portainer/values.yaml +++ b/charts/portainer/values.yaml @@ -4,37 +4,79 @@ replicaCount: 1 +# If enterpriseEdition is enabled, then use the values below _instead_ of those in .image +enterpriseEdition: + enabled: false + image: + repository: portainer/portainer-ee + tag: 2.21.5 + pullPolicy: Always + image: repository: portainer/portainer-ce - tag: latest - pullPolicy: IfNotPresent + tag: 2.21.5 + pullPolicy: Always imagePullSecrets: [] +nodeSelector: {} +tolerations: [] + serviceAccount: annotations: {} name: portainer-sa-clusteradmin +# This flag provides the ability to enable or disable RBAC-related resources during the deployment of the Portainer application +# If you are using Portainer to manage the K8s cluster it is deployed to, this flag must be set to true +localMgmt: true + service: # Set the httpNodePort and edgeNodePort only if the type is NodePort # For Ingress, set the type to be ClusterIP and set ingress.enabled to true # For Cloud Providers, set the type to be LoadBalancer type: NodePort httpPort: 9000 + httpsPort: 9443 httpNodePort: 30777 + httpsNodePort: 30779 edgePort: 8000 edgeNodePort: 30776 + annotations: {} + +tls: + # If set, Portainer will be configured to use TLS only + force: false + # If set, will mount the existing secret into the pod + existingSecret: "" + +mtls: + # If set, Portainer will be configured to use mTLS only + enable: false + # If set, will mount the existing secret into the pod + existingSecret: "" + +feature: + flags: [] ingress: enabled: false + ingressClassName: "" annotations: {} + # kubernetes.io/ingress.class: nginx + # Only use below if tls.force=true + # nginx.ingress.kubernetes.io/backend-protocol: HTTPS + # Note: Hosts and paths are of type array hosts: - host: paths: [] + # - path: "/" tls: [] resources: {} persistence: - size: "1Gi" + enabled: true + size: "10Gi" annotations: {} + storageClass: + existingClaim: diff --git a/deploy/manifests/agent/README.md b/deploy/manifests/agent/README.md index bd430df..7960bd3 100644 --- a/deploy/manifests/agent/README.md +++ b/deploy/manifests/agent/README.md @@ -2,6 +2,8 @@ The manifests used to deploy the Portainer agent inside a Kubernetes cluster. +**NOTE**: Manifests to deploy the Portainer agent Enterprise Edition are available in the *ee* folder. + To deploy an Edge agent inside your Kubernetes cluster, it is recommended to follow the instructions available inside your Portainer instance. # Usage diff --git a/deploy/manifests/agent/ee/README.md b/deploy/manifests/agent/ee/README.md new file mode 100644 index 0000000..bdbd5bb --- /dev/null +++ b/deploy/manifests/agent/ee/README.md @@ -0,0 +1,17 @@ +# Agent (Enterprise Edition) + +The manifests used to deploy the Portainer agent inside a Kubernetes cluster for Enterprise Edition. + +To deploy an Edge agent inside your Kubernetes cluster, it is recommended to follow the instructions available inside your Portainer instance. + +# Usage + +## Deploy the Portainer agent and access it via an external load balancer + +If your cloud provider supports external load balancers, you can use the following command to deploy the regular Portainer agent (not Edge): + +``` +kubectl ... apply -f portainer-agent-k8s-lb.yaml +``` + +This will deploy the Portainer agent and create an external load balancer which you'll be able to use to connect to the agent on port 9001. diff --git a/deploy/manifests/agent/ee/agent-stack-ee20-windows.yml b/deploy/manifests/agent/ee/agent-stack-ee20-windows.yml new file mode 100644 index 0000000..3b54dbb --- /dev/null +++ b/deploy/manifests/agent/ee/agent-stack-ee20-windows.yml @@ -0,0 +1,25 @@ +version: '3.3' +services: + agent: + image: portainer/agent:2.0.0 + ports: + - target: 9001 + published: 9001 + protocol: tcp + volumes: + - type: npipe + source: \\.\pipe\docker_engine + target: \\.\pipe\docker_engine + - type: bind + source: C:\ProgramData\docker\volumes + target: C:\ProgramData\docker\volumes + networks: + - agent_network + deploy: + mode: global + placement: + constraints: [node.platform.os == windows] + +networks: + agent_network: + driver: overlay diff --git a/deploy/manifests/agent/ee/agent-stack-ee20.yml b/deploy/manifests/agent/ee/agent-stack-ee20.yml new file mode 100644 index 0000000..e44ad68 --- /dev/null +++ b/deploy/manifests/agent/ee/agent-stack-ee20.yml @@ -0,0 +1,24 @@ +version: '3.2' + +services: + agent: + image: portainer/agent:2.0.0 + volumes: + - /var/run/docker.sock:/var/run/docker.sock + - /var/lib/docker/volumes:/var/lib/docker/volumes + ports: + - target: 9001 + published: 9001 + protocol: tcp + mode: host + networks: + - portainer_agent + deploy: + mode: global + placement: + constraints: [node.platform.os == linux] + +networks: + portainer_agent: + driver: overlay + attachable: true diff --git a/deploy/manifests/agent/ee/agent-stack-ee24-windows.yml b/deploy/manifests/agent/ee/agent-stack-ee24-windows.yml new file mode 100644 index 0000000..723376c --- /dev/null +++ b/deploy/manifests/agent/ee/agent-stack-ee24-windows.yml @@ -0,0 +1,25 @@ +version: '3.3' +services: + agent: + image: portainer/agent:2.4.0 + ports: + - target: 9001 + published: 9001 + protocol: tcp + volumes: + - type: npipe + source: \\.\pipe\docker_engine + target: \\.\pipe\docker_engine + - type: bind + source: C:\ProgramData\docker\volumes + target: C:\ProgramData\docker\volumes + networks: + - agent_network + deploy: + mode: global + placement: + constraints: [node.platform.os == windows] + +networks: + agent_network: + driver: overlay diff --git a/deploy/manifests/agent/ee/agent-stack-ee24.yml b/deploy/manifests/agent/ee/agent-stack-ee24.yml new file mode 100644 index 0000000..bfeaa42 --- /dev/null +++ b/deploy/manifests/agent/ee/agent-stack-ee24.yml @@ -0,0 +1,24 @@ +version: '3.2' + +services: + agent: + image: portainer/agent:2.4.0 + volumes: + - /var/run/docker.sock:/var/run/docker.sock + - /var/lib/docker/volumes:/var/lib/docker/volumes + ports: + - target: 9001 + published: 9001 + protocol: tcp + mode: host + networks: + - portainer_agent + deploy: + mode: global + placement: + constraints: [node.platform.os == linux] + +networks: + portainer_agent: + driver: overlay + attachable: true diff --git a/deploy/manifests/agent/ee/agent-stack-windows.yml b/deploy/manifests/agent/ee/agent-stack-windows.yml new file mode 100644 index 0000000..fbfbe35 --- /dev/null +++ b/deploy/manifests/agent/ee/agent-stack-windows.yml @@ -0,0 +1,25 @@ +version: '3.3' +services: + agent: + image: portainer/agent:2.21.5 + ports: + - target: 9001 + published: 9001 + protocol: tcp + volumes: + - type: npipe + source: \\.\pipe\docker_engine + target: \\.\pipe\docker_engine + - type: bind + source: C:\ProgramData\docker\volumes + target: C:\ProgramData\docker\volumes + networks: + - agent_network + deploy: + mode: global + placement: + constraints: [node.platform.os == windows] + +networks: + agent_network: + driver: overlay diff --git a/deploy/manifests/agent/ee/agent-stack.yml b/deploy/manifests/agent/ee/agent-stack.yml new file mode 100644 index 0000000..7c96d24 --- /dev/null +++ b/deploy/manifests/agent/ee/agent-stack.yml @@ -0,0 +1,24 @@ +version: '3.2' + +services: + agent: + image: portainer/agent:2.21.5 + volumes: + - /var/run/docker.sock:/var/run/docker.sock + - /var/lib/docker/volumes:/var/lib/docker/volumes + ports: + - target: 9001 + published: 9001 + protocol: tcp + mode: host + networks: + - portainer_agent + deploy: + mode: global + placement: + constraints: [node.platform.os == linux] + +networks: + portainer_agent: + driver: overlay + attachable: true diff --git a/deploy/manifests/agent/ee/portainer-agent-edge-k8s.yaml b/deploy/manifests/agent/ee/portainer-agent-edge-k8s.yaml new file mode 100644 index 0000000..dd1bf52 --- /dev/null +++ b/deploy/manifests/agent/ee/portainer-agent-edge-k8s.yaml @@ -0,0 +1,95 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: portainer +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: portainer-sa-clusteradmin + namespace: portainer +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: portainer-crb-clusteradmin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: portainer-sa-clusteradmin + namespace: portainer +# Optional: can be added to expose the agent port 80 to associate an Edge key. +# --- +# apiVersion: v1 +# kind: Service +# metadata: +# name: portainer-agent +# namespace: portainer +# spec: +# type: LoadBalancer +# selector: +# app: portainer-agent +# ports: +# - name: http +# protocol: TCP +# port: 80 +# targetPort: 80 +--- +apiVersion: v1 +kind: Service +metadata: + name: portainer-agent + namespace: portainer +spec: + clusterIP: None + selector: + app: portainer-agent +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: portainer-agent + namespace: portainer +spec: + selector: + matchLabels: + app: portainer-agent + template: + metadata: + labels: + app: portainer-agent + spec: + serviceAccountName: portainer-sa-clusteradmin + containers: + - name: portainer-agent + image: portainer/agent:2.21.5 + imagePullPolicy: Always + env: + - name: LOG_LEVEL + value: INFO + - name: KUBERNETES_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: EDGE + value: "1" + - name: AGENT_CLUSTER_ADDR + value: "portainer-agent" + - name: EDGE_ID + valueFrom: + configMapKeyRef: + name: portainer-agent-edge-id + key: edge.id + - name: EDGE_KEY + valueFrom: + secretKeyRef: + name: portainer-agent-edge-key + key: edge.key + ports: + - containerPort: 9001 + protocol: TCP + - containerPort: 80 + protocol: TCP diff --git a/deploy/manifests/agent/ee/portainer-agent-ee20-edge-k8s.yaml b/deploy/manifests/agent/ee/portainer-agent-ee20-edge-k8s.yaml new file mode 100644 index 0000000..c4ac4e2 --- /dev/null +++ b/deploy/manifests/agent/ee/portainer-agent-ee20-edge-k8s.yaml @@ -0,0 +1,95 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: portainer +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: portainer-sa-clusteradmin + namespace: portainer +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: portainer-crb-clusteradmin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: portainer-sa-clusteradmin + namespace: portainer +# Optional: can be added to expose the agent port 80 to associate an Edge key. +# --- +# apiVersion: v1 +# kind: Service +# metadata: +# name: portainer-agent +# namespace: portainer +# spec: +# type: LoadBalancer +# selector: +# app: portainer-agent +# ports: +# - name: http +# protocol: TCP +# port: 80 +# targetPort: 80 +--- +apiVersion: v1 +kind: Service +metadata: + name: portainer-agent + namespace: portainer +spec: + clusterIP: None + selector: + app: portainer-agent +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: portainer-agent + namespace: portainer +spec: + selector: + matchLabels: + app: portainer-agent + template: + metadata: + labels: + app: portainer-agent + spec: + serviceAccountName: portainer-sa-clusteradmin + containers: + - name: portainer-agent + image: portainer/agent:2.0.0 + imagePullPolicy: Always + env: + - name: LOG_LEVEL + value: INFO + - name: KUBERNETES_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: EDGE + value: "1" + - name: AGENT_CLUSTER_ADDR + value: "portainer-agent" + - name: EDGE_ID + valueFrom: + configMapKeyRef: + name: portainer-agent-edge-id + key: edge.id + - name: EDGE_KEY + valueFrom: + secretKeyRef: + name: portainer-agent-edge-key + key: edge.key + ports: + - containerPort: 9001 + protocol: TCP + - containerPort: 80 + protocol: TCP diff --git a/deploy/manifests/agent/ee/portainer-agent-ee20-k8s-lb.yaml b/deploy/manifests/agent/ee/portainer-agent-ee20-k8s-lb.yaml new file mode 100644 index 0000000..30d5834 --- /dev/null +++ b/deploy/manifests/agent/ee/portainer-agent-ee20-k8s-lb.yaml @@ -0,0 +1,80 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: portainer +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: portainer-sa-clusteradmin + namespace: portainer +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: portainer-crb-clusteradmin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: portainer-sa-clusteradmin + namespace: portainer +--- +apiVersion: v1 +kind: Service +metadata: + name: portainer-agent + namespace: portainer +spec: + type: LoadBalancer + selector: + app: portainer-agent + ports: + - name: http + protocol: TCP + port: 9001 + targetPort: 9001 +--- +apiVersion: v1 +kind: Service +metadata: + name: portainer-agent-headless + namespace: portainer +spec: + clusterIP: None + selector: + app: portainer-agent +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: portainer-agent + namespace: portainer +spec: + selector: + matchLabels: + app: portainer-agent + template: + metadata: + labels: + app: portainer-agent + spec: + serviceAccountName: portainer-sa-clusteradmin + containers: + - name: portainer-agent + image: portainer/agent:2.0.0 + imagePullPolicy: Always + env: + - name: LOG_LEVEL + value: DEBUG + - name: AGENT_CLUSTER_ADDR + value: "portainer-agent-headless" + - name: KUBERNETES_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + ports: + - containerPort: 9001 + protocol: TCP diff --git a/deploy/manifests/agent/ee/portainer-agent-ee20-k8s-nodeport.yaml b/deploy/manifests/agent/ee/portainer-agent-ee20-k8s-nodeport.yaml new file mode 100644 index 0000000..a8f190d --- /dev/null +++ b/deploy/manifests/agent/ee/portainer-agent-ee20-k8s-nodeport.yaml @@ -0,0 +1,81 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: portainer +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: portainer-sa-clusteradmin + namespace: portainer +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: portainer-crb-clusteradmin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: portainer-sa-clusteradmin + namespace: portainer +--- +apiVersion: v1 +kind: Service +metadata: + name: portainer-agent + namespace: portainer +spec: + type: NodePort + selector: + app: portainer-agent + ports: + - name: http + protocol: TCP + port: 9001 + targetPort: 9001 + nodePort: 30778 +--- +apiVersion: v1 +kind: Service +metadata: + name: portainer-agent-headless + namespace: portainer +spec: + clusterIP: None + selector: + app: portainer-agent +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: portainer-agent + namespace: portainer +spec: + selector: + matchLabels: + app: portainer-agent + template: + metadata: + labels: + app: portainer-agent + spec: + serviceAccountName: portainer-sa-clusteradmin + containers: + - name: portainer-agent + image: portainer/agent:2.0.0 + imagePullPolicy: Always + env: + - name: LOG_LEVEL + value: DEBUG + - name: AGENT_CLUSTER_ADDR + value: "portainer-agent-headless" + - name: KUBERNETES_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + ports: + - containerPort: 9001 + protocol: TCP diff --git a/deploy/manifests/agent/ee/portainer-agent-ee210-edge-k8s.yaml b/deploy/manifests/agent/ee/portainer-agent-ee210-edge-k8s.yaml new file mode 100644 index 0000000..47f4b71 --- /dev/null +++ b/deploy/manifests/agent/ee/portainer-agent-ee210-edge-k8s.yaml @@ -0,0 +1,100 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: portainer +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: portainer-sa-clusteradmin + namespace: portainer +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: portainer-crb-clusteradmin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: portainer-sa-clusteradmin + namespace: portainer +# Optional: can be added to expose the agent port 80 to associate an Edge key. +# --- +# apiVersion: v1 +# kind: Service +# metadata: +# name: portainer-agent +# namespace: portainer +# spec: +# type: LoadBalancer +# selector: +# app: portainer-agent +# ports: +# - name: http +# protocol: TCP +# port: 80 +# targetPort: 80 +--- +apiVersion: v1 +kind: Service +metadata: + name: portainer-agent + namespace: portainer +spec: + clusterIP: None + selector: + app: portainer-agent +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: portainer-agent + namespace: portainer +spec: + selector: + matchLabels: + app: portainer-agent + template: + metadata: + labels: + app: portainer-agent + spec: + serviceAccountName: portainer-sa-clusteradmin + containers: + - name: portainer-agent + image: portainer/agent:2.10.0 + imagePullPolicy: Always + env: + - name: LOG_LEVEL + value: INFO + - name: KUBERNETES_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: EDGE + value: "1" + - name: AGENT_CLUSTER_ADDR + value: "portainer-agent" + - name: EDGE_ID + valueFrom: + configMapKeyRef: + name: portainer-agent-edge + key: edge.id + - name: EDGE_INSECURE_POLL + valueFrom: + configMapKeyRef: + name: portainer-agent-edge + key: edge.insecure_poll + - name: EDGE_KEY + valueFrom: + secretKeyRef: + name: portainer-agent-edge-key + key: edge.key + ports: + - containerPort: 9001 + protocol: TCP + - containerPort: 80 + protocol: TCP diff --git a/deploy/manifests/agent/ee/portainer-agent-ee210-k8s-lb.yaml b/deploy/manifests/agent/ee/portainer-agent-ee210-k8s-lb.yaml new file mode 100644 index 0000000..7f03e5f --- /dev/null +++ b/deploy/manifests/agent/ee/portainer-agent-ee210-k8s-lb.yaml @@ -0,0 +1,80 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: portainer +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: portainer-sa-clusteradmin + namespace: portainer +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: portainer-crb-clusteradmin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: portainer-sa-clusteradmin + namespace: portainer +--- +apiVersion: v1 +kind: Service +metadata: + name: portainer-agent + namespace: portainer +spec: + type: LoadBalancer + selector: + app: portainer-agent + ports: + - name: http + protocol: TCP + port: 9001 + targetPort: 9001 +--- +apiVersion: v1 +kind: Service +metadata: + name: portainer-agent-headless + namespace: portainer +spec: + clusterIP: None + selector: + app: portainer-agent +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: portainer-agent + namespace: portainer +spec: + selector: + matchLabels: + app: portainer-agent + template: + metadata: + labels: + app: portainer-agent + spec: + serviceAccountName: portainer-sa-clusteradmin + containers: + - name: portainer-agent + image: portainer/agent:2.10.0 + imagePullPolicy: Always + env: + - name: LOG_LEVEL + value: INFO + - name: AGENT_CLUSTER_ADDR + value: "portainer-agent-headless" + - name: KUBERNETES_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + ports: + - containerPort: 9001 + protocol: TCP diff --git a/deploy/manifests/agent/ee/portainer-agent-ee210-k8s-nodeport.yaml b/deploy/manifests/agent/ee/portainer-agent-ee210-k8s-nodeport.yaml new file mode 100644 index 0000000..cc446e5 --- /dev/null +++ b/deploy/manifests/agent/ee/portainer-agent-ee210-k8s-nodeport.yaml @@ -0,0 +1,81 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: portainer +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: portainer-sa-clusteradmin + namespace: portainer +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: portainer-crb-clusteradmin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: portainer-sa-clusteradmin + namespace: portainer +--- +apiVersion: v1 +kind: Service +metadata: + name: portainer-agent + namespace: portainer +spec: + type: NodePort + selector: + app: portainer-agent + ports: + - name: http + protocol: TCP + port: 9001 + targetPort: 9001 + nodePort: 30778 +--- +apiVersion: v1 +kind: Service +metadata: + name: portainer-agent-headless + namespace: portainer +spec: + clusterIP: None + selector: + app: portainer-agent +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: portainer-agent + namespace: portainer +spec: + selector: + matchLabels: + app: portainer-agent + template: + metadata: + labels: + app: portainer-agent + spec: + serviceAccountName: portainer-sa-clusteradmin + containers: + - name: portainer-agent + image: portainer/agent:2.4.0 + imagePullPolicy: Always + env: + - name: LOG_LEVEL + value: INFO + - name: AGENT_CLUSTER_ADDR + value: "portainer-agent-headless" + - name: KUBERNETES_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + ports: + - containerPort: 9001 + protocol: TCP diff --git a/deploy/manifests/agent/ee/portainer-agent-ee24-edge-k8s.yaml b/deploy/manifests/agent/ee/portainer-agent-ee24-edge-k8s.yaml new file mode 100644 index 0000000..0d54b70 --- /dev/null +++ b/deploy/manifests/agent/ee/portainer-agent-ee24-edge-k8s.yaml @@ -0,0 +1,95 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: portainer +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: portainer-sa-clusteradmin + namespace: portainer +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: portainer-crb-clusteradmin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: portainer-sa-clusteradmin + namespace: portainer +# Optional: can be added to expose the agent port 80 to associate an Edge key. +# --- +# apiVersion: v1 +# kind: Service +# metadata: +# name: portainer-agent +# namespace: portainer +# spec: +# type: LoadBalancer +# selector: +# app: portainer-agent +# ports: +# - name: http +# protocol: TCP +# port: 80 +# targetPort: 80 +--- +apiVersion: v1 +kind: Service +metadata: + name: portainer-agent + namespace: portainer +spec: + clusterIP: None + selector: + app: portainer-agent +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: portainer-agent + namespace: portainer +spec: + selector: + matchLabels: + app: portainer-agent + template: + metadata: + labels: + app: portainer-agent + spec: + serviceAccountName: portainer-sa-clusteradmin + containers: + - name: portainer-agent + image: portainer/agent:2.4.0 + imagePullPolicy: Always + env: + - name: LOG_LEVEL + value: INFO + - name: KUBERNETES_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: EDGE + value: "1" + - name: AGENT_CLUSTER_ADDR + value: "portainer-agent" + - name: EDGE_ID + valueFrom: + configMapKeyRef: + name: portainer-agent-edge-id + key: edge.id + - name: EDGE_KEY + valueFrom: + secretKeyRef: + name: portainer-agent-edge-key + key: edge.key + ports: + - containerPort: 9001 + protocol: TCP + - containerPort: 80 + protocol: TCP diff --git a/deploy/manifests/agent/ee/portainer-agent-ee24-k8s-lb.yaml b/deploy/manifests/agent/ee/portainer-agent-ee24-k8s-lb.yaml new file mode 100644 index 0000000..1b71a72 --- /dev/null +++ b/deploy/manifests/agent/ee/portainer-agent-ee24-k8s-lb.yaml @@ -0,0 +1,80 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: portainer +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: portainer-sa-clusteradmin + namespace: portainer +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: portainer-crb-clusteradmin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: portainer-sa-clusteradmin + namespace: portainer +--- +apiVersion: v1 +kind: Service +metadata: + name: portainer-agent + namespace: portainer +spec: + type: LoadBalancer + selector: + app: portainer-agent + ports: + - name: http + protocol: TCP + port: 9001 + targetPort: 9001 +--- +apiVersion: v1 +kind: Service +metadata: + name: portainer-agent-headless + namespace: portainer +spec: + clusterIP: None + selector: + app: portainer-agent +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: portainer-agent + namespace: portainer +spec: + selector: + matchLabels: + app: portainer-agent + template: + metadata: + labels: + app: portainer-agent + spec: + serviceAccountName: portainer-sa-clusteradmin + containers: + - name: portainer-agent + image: portainer/agent:2.4.0 + imagePullPolicy: Always + env: + - name: LOG_LEVEL + value: INFO + - name: AGENT_CLUSTER_ADDR + value: "portainer-agent-headless" + - name: KUBERNETES_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + ports: + - containerPort: 9001 + protocol: TCP diff --git a/deploy/manifests/agent/ee/portainer-agent-ee24-k8s-nodeport.yaml b/deploy/manifests/agent/ee/portainer-agent-ee24-k8s-nodeport.yaml new file mode 100644 index 0000000..cc446e5 --- /dev/null +++ b/deploy/manifests/agent/ee/portainer-agent-ee24-k8s-nodeport.yaml @@ -0,0 +1,81 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: portainer +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: portainer-sa-clusteradmin + namespace: portainer +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: portainer-crb-clusteradmin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: portainer-sa-clusteradmin + namespace: portainer +--- +apiVersion: v1 +kind: Service +metadata: + name: portainer-agent + namespace: portainer +spec: + type: NodePort + selector: + app: portainer-agent + ports: + - name: http + protocol: TCP + port: 9001 + targetPort: 9001 + nodePort: 30778 +--- +apiVersion: v1 +kind: Service +metadata: + name: portainer-agent-headless + namespace: portainer +spec: + clusterIP: None + selector: + app: portainer-agent +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: portainer-agent + namespace: portainer +spec: + selector: + matchLabels: + app: portainer-agent + template: + metadata: + labels: + app: portainer-agent + spec: + serviceAccountName: portainer-sa-clusteradmin + containers: + - name: portainer-agent + image: portainer/agent:2.4.0 + imagePullPolicy: Always + env: + - name: LOG_LEVEL + value: INFO + - name: AGENT_CLUSTER_ADDR + value: "portainer-agent-headless" + - name: KUBERNETES_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + ports: + - containerPort: 9001 + protocol: TCP diff --git a/deploy/manifests/agent/ee/portainer-agent-k8s-lb.yaml b/deploy/manifests/agent/ee/portainer-agent-k8s-lb.yaml new file mode 100644 index 0000000..d93ab99 --- /dev/null +++ b/deploy/manifests/agent/ee/portainer-agent-k8s-lb.yaml @@ -0,0 +1,80 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: portainer +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: portainer-sa-clusteradmin + namespace: portainer +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: portainer-crb-clusteradmin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: portainer-sa-clusteradmin + namespace: portainer +--- +apiVersion: v1 +kind: Service +metadata: + name: portainer-agent + namespace: portainer +spec: + type: LoadBalancer + selector: + app: portainer-agent + ports: + - name: http + protocol: TCP + port: 9001 + targetPort: 9001 +--- +apiVersion: v1 +kind: Service +metadata: + name: portainer-agent-headless + namespace: portainer +spec: + clusterIP: None + selector: + app: portainer-agent +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: portainer-agent + namespace: portainer +spec: + selector: + matchLabels: + app: portainer-agent + template: + metadata: + labels: + app: portainer-agent + spec: + serviceAccountName: portainer-sa-clusteradmin + containers: + - name: portainer-agent + image: portainer/agent:2.21.5 + imagePullPolicy: Always + env: + - name: LOG_LEVEL + value: INFO + - name: AGENT_CLUSTER_ADDR + value: "portainer-agent-headless" + - name: KUBERNETES_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + ports: + - containerPort: 9001 + protocol: TCP diff --git a/deploy/manifests/agent/ee/portainer-agent-k8s-nodeport.yaml b/deploy/manifests/agent/ee/portainer-agent-k8s-nodeport.yaml new file mode 100644 index 0000000..bdf734a --- /dev/null +++ b/deploy/manifests/agent/ee/portainer-agent-k8s-nodeport.yaml @@ -0,0 +1,81 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: portainer +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: portainer-sa-clusteradmin + namespace: portainer +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: portainer-crb-clusteradmin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: portainer-sa-clusteradmin + namespace: portainer +--- +apiVersion: v1 +kind: Service +metadata: + name: portainer-agent + namespace: portainer +spec: + type: NodePort + selector: + app: portainer-agent + ports: + - name: http + protocol: TCP + port: 9001 + targetPort: 9001 + nodePort: 30778 +--- +apiVersion: v1 +kind: Service +metadata: + name: portainer-agent-headless + namespace: portainer +spec: + clusterIP: None + selector: + app: portainer-agent +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: portainer-agent + namespace: portainer +spec: + selector: + matchLabels: + app: portainer-agent + template: + metadata: + labels: + app: portainer-agent + spec: + serviceAccountName: portainer-sa-clusteradmin + containers: + - name: portainer-agent + image: portainer/agent:2.21.5 + imagePullPolicy: Always + env: + - name: LOG_LEVEL + value: INFO + - name: AGENT_CLUSTER_ADDR + value: "portainer-agent-headless" + - name: KUBERNETES_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + ports: + - containerPort: 9001 + protocol: TCP diff --git a/deploy/manifests/agent/portainer-edge-agent-setup.sh b/deploy/manifests/agent/ee/portainer-edge-agent-setup.sh old mode 100755 new mode 100644 similarity index 96% rename from deploy/manifests/agent/portainer-edge-agent-setup.sh rename to deploy/manifests/agent/ee/portainer-edge-agent-setup.sh index 47cfde5..db86e01 --- a/deploy/manifests/agent/portainer-edge-agent-setup.sh +++ b/deploy/manifests/agent/ee/portainer-edge-agent-setup.sh @@ -55,7 +55,7 @@ main() { [[ "$(command -v kubectl)" ]] || errorAndExit "Unable to find kubectl binary. Please ensure kubectl is installed before running this script." info "Downloading agent manifest..." - curl -L https://portainer.github.io/k8s/deploy/manifests/agent/portainer-agent-edge-k8s.yaml -o portainer-agent-edge-k8s.yaml || errorAndExit "Unable to download agent manifest" + curl -L https://portainer.github.io/k8s/deploy/manifests/agent/ee/portainer-agent-edge-k8s.yaml -o portainer-agent-edge-k8s.yaml || errorAndExit "Unable to download agent manifest" info "Creating Portainer namespace..." kubectl create namespace portainer diff --git a/deploy/manifests/agent/ee/portainer-ee20-edge-agent-setup.sh b/deploy/manifests/agent/ee/portainer-ee20-edge-agent-setup.sh new file mode 100644 index 0000000..eba296c --- /dev/null +++ b/deploy/manifests/agent/ee/portainer-ee20-edge-agent-setup.sh @@ -0,0 +1,76 @@ +#!/usr/bin/env bash + +# Script used to deploy the Portainer Edge agent inside a Kubernetes cluster. + +# Requires: +# curl +# kubectl + +### COLOR OUTPUT ### + +ESeq="\x1b[" +RCol="$ESeq"'0m' # Text Reset + +# Regular Bold Underline High Intensity BoldHigh Intens Background High Intensity Backgrounds +Bla="$ESeq"'0;30m'; BBla="$ESeq"'1;30m'; UBla="$ESeq"'4;30m'; IBla="$ESeq"'0;90m'; BIBla="$ESeq"'1;90m'; On_Bla="$ESeq"'40m'; On_IBla="$ESeq"'0;100m'; +Red="$ESeq"'0;31m'; BRed="$ESeq"'1;31m'; URed="$ESeq"'4;31m'; IRed="$ESeq"'0;91m'; BIRed="$ESeq"'1;91m'; On_Red="$ESeq"'41m'; On_IRed="$ESeq"'0;101m'; +Gre="$ESeq"'0;32m'; BGre="$ESeq"'1;32m'; UGre="$ESeq"'4;32m'; IGre="$ESeq"'0;92m'; BIGre="$ESeq"'1;92m'; On_Gre="$ESeq"'42m'; On_IGre="$ESeq"'0;102m'; +Yel="$ESeq"'0;33m'; BYel="$ESeq"'1;33m'; UYel="$ESeq"'4;33m'; IYel="$ESeq"'0;93m'; BIYel="$ESeq"'1;93m'; On_Yel="$ESeq"'43m'; On_IYel="$ESeq"'0;103m'; +Blu="$ESeq"'0;34m'; BBlu="$ESeq"'1;34m'; UBlu="$ESeq"'4;34m'; IBlu="$ESeq"'0;94m'; BIBlu="$ESeq"'1;94m'; On_Blu="$ESeq"'44m'; On_IBlu="$ESeq"'0;104m'; +Pur="$ESeq"'0;35m'; BPur="$ESeq"'1;35m'; UPur="$ESeq"'4;35m'; IPur="$ESeq"'0;95m'; BIPur="$ESeq"'1;95m'; On_Pur="$ESeq"'45m'; On_IPur="$ESeq"'0;105m'; +Cya="$ESeq"'0;36m'; BCya="$ESeq"'1;36m'; UCya="$ESeq"'4;36m'; ICya="$ESeq"'0;96m'; BICya="$ESeq"'1;96m'; On_Cya="$ESeq"'46m'; On_ICya="$ESeq"'0;106m'; +Whi="$ESeq"'0;37m'; BWhi="$ESeq"'1;37m'; UWhi="$ESeq"'4;37m'; IWhi="$ESeq"'0;97m'; BIWhi="$ESeq"'1;97m'; On_Whi="$ESeq"'47m'; On_IWhi="$ESeq"'0;107m'; + +printSection() { + echo -e "${BIYel}>>>> ${BIWhi}${1}${RCol}" +} + +info() { + echo -e "${BIWhi}${1}${RCol}" +} + +success() { + echo -e "${BIGre}${1}${RCol}" +} + +error() { + echo -e "${BIRed}${1}${RCol}" +} + +errorAndExit() { + echo -e "${BIRed}${1}${RCol}" + exit 1 +} + +### !COLOR OUTPUT ### + +main() { + if [[ $# -ne 2 ]]; then + error "Not enough arguments" + error "Usage: ${0} " + exit 1 + fi + + [[ "$(command -v curl)" ]] || errorAndExit "Unable to find curl binary. Please ensure curl is installed before running this script." + [[ "$(command -v kubectl)" ]] || errorAndExit "Unable to find kubectl binary. Please ensure kubectl is installed before running this script." + + info "Downloading agent manifest..." + curl -L https://portainer.github.io/k8s/deploy/manifests/agent/ee/portainer-agent-ee20-edge-k8s.yaml -o portainer-agent-edge-k8s.yaml || errorAndExit "Unable to download agent manifest" + + info "Creating Portainer namespace..." + kubectl create namespace portainer + + info "Creating agent configuration..." + kubectl create configmap portainer-agent-edge-id "--from-literal=edge.id=$1" -n portainer + + info "Creating agent secret..." + kubectl create secret generic portainer-agent-edge-key "--from-literal=edge.key=$2" -n portainer + + info "Deploying agent..." + kubectl apply -f portainer-agent-edge-k8s.yaml || errorAndExit "Unable to deploy agent manifest" + + success "Portainer Edge agent successfully deployed" + exit 0 +} + +main "$@" diff --git a/deploy/manifests/agent/ee/portainer-ee210-edge-agent-setup.sh b/deploy/manifests/agent/ee/portainer-ee210-edge-agent-setup.sh new file mode 100644 index 0000000..66fbb6e --- /dev/null +++ b/deploy/manifests/agent/ee/portainer-ee210-edge-agent-setup.sh @@ -0,0 +1,80 @@ +#!/usr/bin/env bash + +# Script used to deploy the Portainer Edge agent inside a Kubernetes cluster. + +# Requires: +# curl +# kubectl + +### COLOR OUTPUT ### + +ESeq="\x1b[" +RCol="$ESeq"'0m' # Text Reset + +# Regular Bold Underline High Intensity BoldHigh Intens Background High Intensity Backgrounds +Bla="$ESeq"'0;30m'; BBla="$ESeq"'1;30m'; UBla="$ESeq"'4;30m'; IBla="$ESeq"'0;90m'; BIBla="$ESeq"'1;90m'; On_Bla="$ESeq"'40m'; On_IBla="$ESeq"'0;100m'; +Red="$ESeq"'0;31m'; BRed="$ESeq"'1;31m'; URed="$ESeq"'4;31m'; IRed="$ESeq"'0;91m'; BIRed="$ESeq"'1;91m'; On_Red="$ESeq"'41m'; On_IRed="$ESeq"'0;101m'; +Gre="$ESeq"'0;32m'; BGre="$ESeq"'1;32m'; UGre="$ESeq"'4;32m'; IGre="$ESeq"'0;92m'; BIGre="$ESeq"'1;92m'; On_Gre="$ESeq"'42m'; On_IGre="$ESeq"'0;102m'; +Yel="$ESeq"'0;33m'; BYel="$ESeq"'1;33m'; UYel="$ESeq"'4;33m'; IYel="$ESeq"'0;93m'; BIYel="$ESeq"'1;93m'; On_Yel="$ESeq"'43m'; On_IYel="$ESeq"'0;103m'; +Blu="$ESeq"'0;34m'; BBlu="$ESeq"'1;34m'; UBlu="$ESeq"'4;34m'; IBlu="$ESeq"'0;94m'; BIBlu="$ESeq"'1;94m'; On_Blu="$ESeq"'44m'; On_IBlu="$ESeq"'0;104m'; +Pur="$ESeq"'0;35m'; BPur="$ESeq"'1;35m'; UPur="$ESeq"'4;35m'; IPur="$ESeq"'0;95m'; BIPur="$ESeq"'1;95m'; On_Pur="$ESeq"'45m'; On_IPur="$ESeq"'0;105m'; +Cya="$ESeq"'0;36m'; BCya="$ESeq"'1;36m'; UCya="$ESeq"'4;36m'; ICya="$ESeq"'0;96m'; BICya="$ESeq"'1;96m'; On_Cya="$ESeq"'46m'; On_ICya="$ESeq"'0;106m'; +Whi="$ESeq"'0;37m'; BWhi="$ESeq"'1;37m'; UWhi="$ESeq"'4;37m'; IWhi="$ESeq"'0;97m'; BIWhi="$ESeq"'1;97m'; On_Whi="$ESeq"'47m'; On_IWhi="$ESeq"'0;107m'; + +printSection() { + echo -e "${BIYel}>>>> ${BIWhi}${1}${RCol}" +} + +info() { + echo -e "${BIWhi}${1}${RCol}" +} + +success() { + echo -e "${BIGre}${1}${RCol}" +} + +error() { + echo -e "${BIRed}${1}${RCol}" +} + +errorAndExit() { + echo -e "${BIRed}${1}${RCol}" + exit 1 +} + +### !COLOR OUTPUT ### + +main() { + if [[ $# -lt 2 ]]; then + error "Not enough arguments" + error "Usage: ${0} " + exit 1 + fi + + local EDGE_ID="$1" + local EDGE_KEY="$2" + local EDGE_INSECURE_POLL="$3" + + [[ "$(command -v curl)" ]] || errorAndExit "Unable to find curl binary. Please ensure curl is installed before running this script." + [[ "$(command -v kubectl)" ]] || errorAndExit "Unable to find kubectl binary. Please ensure kubectl is installed before running this script." + + info "Downloading agent manifest..." + curl -L https://portainer.github.io/k8s/deploy/manifests/agent/ee/portainer-agent-ee210-edge-k8s.yaml -o portainer-agent-edge-k8s.yaml || errorAndExit "Unable to download agent manifest" + + info "Creating Portainer namespace..." + kubectl create namespace portainer + + info "Creating agent configuration..." + kubectl create configmap portainer-agent-edge --from-literal="edge.id=$EDGE_ID" --from-literal="edge.insecure_poll=$EDGE_INSECURE_POLL" -n portainer + + info "Creating agent secret..." + kubectl create secret generic portainer-agent-edge-key "--from-literal=edge.key=$EDGE_KEY" -n portainer + + info "Deploying agent..." + kubectl apply -f portainer-agent-edge-k8s.yaml || errorAndExit "Unable to deploy agent manifest" + + success "Portainer Edge agent successfully deployed" + exit 0 +} + +main "$@" diff --git a/deploy/manifests/agent/ee/portainer-ee24-edge-agent-setup.sh b/deploy/manifests/agent/ee/portainer-ee24-edge-agent-setup.sh new file mode 100644 index 0000000..b6d0a09 --- /dev/null +++ b/deploy/manifests/agent/ee/portainer-ee24-edge-agent-setup.sh @@ -0,0 +1,76 @@ +#!/usr/bin/env bash + +# Script used to deploy the Portainer Edge agent inside a Kubernetes cluster. + +# Requires: +# curl +# kubectl + +### COLOR OUTPUT ### + +ESeq="\x1b[" +RCol="$ESeq"'0m' # Text Reset + +# Regular Bold Underline High Intensity BoldHigh Intens Background High Intensity Backgrounds +Bla="$ESeq"'0;30m'; BBla="$ESeq"'1;30m'; UBla="$ESeq"'4;30m'; IBla="$ESeq"'0;90m'; BIBla="$ESeq"'1;90m'; On_Bla="$ESeq"'40m'; On_IBla="$ESeq"'0;100m'; +Red="$ESeq"'0;31m'; BRed="$ESeq"'1;31m'; URed="$ESeq"'4;31m'; IRed="$ESeq"'0;91m'; BIRed="$ESeq"'1;91m'; On_Red="$ESeq"'41m'; On_IRed="$ESeq"'0;101m'; +Gre="$ESeq"'0;32m'; BGre="$ESeq"'1;32m'; UGre="$ESeq"'4;32m'; IGre="$ESeq"'0;92m'; BIGre="$ESeq"'1;92m'; On_Gre="$ESeq"'42m'; On_IGre="$ESeq"'0;102m'; +Yel="$ESeq"'0;33m'; BYel="$ESeq"'1;33m'; UYel="$ESeq"'4;33m'; IYel="$ESeq"'0;93m'; BIYel="$ESeq"'1;93m'; On_Yel="$ESeq"'43m'; On_IYel="$ESeq"'0;103m'; +Blu="$ESeq"'0;34m'; BBlu="$ESeq"'1;34m'; UBlu="$ESeq"'4;34m'; IBlu="$ESeq"'0;94m'; BIBlu="$ESeq"'1;94m'; On_Blu="$ESeq"'44m'; On_IBlu="$ESeq"'0;104m'; +Pur="$ESeq"'0;35m'; BPur="$ESeq"'1;35m'; UPur="$ESeq"'4;35m'; IPur="$ESeq"'0;95m'; BIPur="$ESeq"'1;95m'; On_Pur="$ESeq"'45m'; On_IPur="$ESeq"'0;105m'; +Cya="$ESeq"'0;36m'; BCya="$ESeq"'1;36m'; UCya="$ESeq"'4;36m'; ICya="$ESeq"'0;96m'; BICya="$ESeq"'1;96m'; On_Cya="$ESeq"'46m'; On_ICya="$ESeq"'0;106m'; +Whi="$ESeq"'0;37m'; BWhi="$ESeq"'1;37m'; UWhi="$ESeq"'4;37m'; IWhi="$ESeq"'0;97m'; BIWhi="$ESeq"'1;97m'; On_Whi="$ESeq"'47m'; On_IWhi="$ESeq"'0;107m'; + +printSection() { + echo -e "${BIYel}>>>> ${BIWhi}${1}${RCol}" +} + +info() { + echo -e "${BIWhi}${1}${RCol}" +} + +success() { + echo -e "${BIGre}${1}${RCol}" +} + +error() { + echo -e "${BIRed}${1}${RCol}" +} + +errorAndExit() { + echo -e "${BIRed}${1}${RCol}" + exit 1 +} + +### !COLOR OUTPUT ### + +main() { + if [[ $# -ne 2 ]]; then + error "Not enough arguments" + error "Usage: ${0} " + exit 1 + fi + + [[ "$(command -v curl)" ]] || errorAndExit "Unable to find curl binary. Please ensure curl is installed before running this script." + [[ "$(command -v kubectl)" ]] || errorAndExit "Unable to find kubectl binary. Please ensure kubectl is installed before running this script." + + info "Downloading agent manifest..." + curl -L https://portainer.github.io/k8s/deploy/manifests/agent/ee/portainer-agent-ee24-edge-k8s.yaml -o portainer-agent-edge-k8s.yaml || errorAndExit "Unable to download agent manifest" + + info "Creating Portainer namespace..." + kubectl create namespace portainer + + info "Creating agent configuration..." + kubectl create configmap portainer-agent-edge-id "--from-literal=edge.id=$1" -n portainer + + info "Creating agent secret..." + kubectl create secret generic portainer-agent-edge-key "--from-literal=edge.key=$2" -n portainer + + info "Deploying agent..." + kubectl apply -f portainer-agent-edge-k8s.yaml || errorAndExit "Unable to deploy agent manifest" + + success "Portainer Edge agent successfully deployed" + exit 0 +} + +main "$@" diff --git a/deploy/manifests/agent/portainer-agent-edge-k8s.yaml b/deploy/manifests/agent/portainer-agent-edge-k8s.yaml index ec6d04e..dd1bf52 100644 --- a/deploy/manifests/agent/portainer-agent-edge-k8s.yaml +++ b/deploy/manifests/agent/portainer-agent-edge-k8s.yaml @@ -9,7 +9,7 @@ metadata: name: portainer-sa-clusteradmin namespace: portainer --- -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: portainer-crb-clusteradmin @@ -65,11 +65,11 @@ spec: serviceAccountName: portainer-sa-clusteradmin containers: - name: portainer-agent - image: portainer/agent:latest + image: portainer/agent:2.21.5 imagePullPolicy: Always env: - name: LOG_LEVEL - value: DEBUG + value: INFO - name: KUBERNETES_POD_IP valueFrom: fieldRef: diff --git a/deploy/manifests/agent/portainer-agent-k8s-lb.yaml b/deploy/manifests/agent/portainer-agent-k8s-lb.yaml index 8795dda..d93ab99 100644 --- a/deploy/manifests/agent/portainer-agent-k8s-lb.yaml +++ b/deploy/manifests/agent/portainer-agent-k8s-lb.yaml @@ -9,7 +9,7 @@ metadata: name: portainer-sa-clusteradmin namespace: portainer --- -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: portainer-crb-clusteradmin @@ -64,11 +64,11 @@ spec: serviceAccountName: portainer-sa-clusteradmin containers: - name: portainer-agent - image: portainer/agent:latest + image: portainer/agent:2.21.5 imagePullPolicy: Always env: - name: LOG_LEVEL - value: DEBUG + value: INFO - name: AGENT_CLUSTER_ADDR value: "portainer-agent-headless" - name: KUBERNETES_POD_IP diff --git a/deploy/manifests/agent/portainer-agent-k8s-nodeport.yaml b/deploy/manifests/agent/portainer-agent-k8s-nodeport.yaml index f6f8539..bdf734a 100644 --- a/deploy/manifests/agent/portainer-agent-k8s-nodeport.yaml +++ b/deploy/manifests/agent/portainer-agent-k8s-nodeport.yaml @@ -9,7 +9,7 @@ metadata: name: portainer-sa-clusteradmin namespace: portainer --- -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: portainer-crb-clusteradmin @@ -65,11 +65,11 @@ spec: serviceAccountName: portainer-sa-clusteradmin containers: - name: portainer-agent - image: portainer/agent:latest + image: portainer/agent:2.21.5 imagePullPolicy: Always env: - name: LOG_LEVEL - value: DEBUG + value: INFO - name: AGENT_CLUSTER_ADDR value: "portainer-agent-headless" - name: KUBERNETES_POD_IP diff --git a/deploy/manifests/portainer/portainer-ee.yaml b/deploy/manifests/portainer/portainer-ee.yaml new file mode 100644 index 0000000..201e047 --- /dev/null +++ b/deploy/manifests/portainer/portainer-ee.yaml @@ -0,0 +1,152 @@ +--- +# Source: portainer/templates/namespace.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: portainer +--- +# Source: portainer/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: portainer-sa-clusteradmin + namespace: portainer + labels: + app.kubernetes.io/name: portainer + app.kubernetes.io/instance: portainer + app.kubernetes.io/version: "ce-latest-ee-2.21.5" +--- +# Source: portainer/templates/pvc.yaml +kind: "PersistentVolumeClaim" +apiVersion: "v1" +metadata: + name: portainer + namespace: portainer + annotations: + volume.alpha.kubernetes.io/storage-class: "generic" + labels: + io.portainer.kubernetes.application.stack: portainer + app.kubernetes.io/name: portainer + app.kubernetes.io/instance: portainer + app.kubernetes.io/version: "ce-latest-ee-2.21.5" +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "10Gi" +--- +# Source: portainer/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: portainer + labels: + app.kubernetes.io/name: portainer + app.kubernetes.io/instance: portainer + app.kubernetes.io/version: "ce-latest-ee-2.21.5" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + namespace: portainer + name: portainer-sa-clusteradmin +--- +# Source: portainer/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: portainer + namespace: portainer + labels: + io.portainer.kubernetes.application.stack: portainer + app.kubernetes.io/name: portainer + app.kubernetes.io/instance: portainer + app.kubernetes.io/version: "ce-latest-ee-2.21.5" +spec: + type: NodePort + ports: + - port: 9000 + targetPort: 9000 + protocol: TCP + name: http + nodePort: 30777 + - port: 9443 + targetPort: 9443 + protocol: TCP + name: https + nodePort: 30779 + - port: 30776 + targetPort: 30776 + protocol: TCP + name: edge + nodePort: 30776 + selector: + app.kubernetes.io/name: portainer + app.kubernetes.io/instance: portainer +--- +# Source: portainer/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: portainer + namespace: portainer + labels: + io.portainer.kubernetes.application.stack: portainer + app.kubernetes.io/name: portainer + app.kubernetes.io/instance: portainer + app.kubernetes.io/version: "ce-latest-ee-2.21.5" +spec: + replicas: 1 + strategy: + type: "Recreate" + selector: + matchLabels: + app.kubernetes.io/name: portainer + app.kubernetes.io/instance: portainer + template: + metadata: + labels: + app.kubernetes.io/name: portainer + app.kubernetes.io/instance: portainer + spec: + nodeSelector: + {} + serviceAccountName: portainer-sa-clusteradmin + volumes: + - name: "data" + persistentVolumeClaim: + claimName: portainer + containers: + - name: portainer + image: "portainer/portainer-ee:2.21.5" + imagePullPolicy: Always + args: + - '--tunnel-port=30776' + volumeMounts: + - name: data + mountPath: /data + ports: + - name: http + containerPort: 9000 + protocol: TCP + - name: https + containerPort: 9443 + protocol: TCP + - name: tcp-edge + containerPort: 8000 + protocol: TCP + livenessProbe: + httpGet: + path: / + port: 9443 + scheme: HTTPS + readinessProbe: + httpGet: + path: / + port: 9443 + scheme: HTTPS + resources: + {} diff --git a/deploy/manifests/portainer/portainer-lb-ee.yaml b/deploy/manifests/portainer/portainer-lb-ee.yaml new file mode 100644 index 0000000..f91bdd7 --- /dev/null +++ b/deploy/manifests/portainer/portainer-lb-ee.yaml @@ -0,0 +1,148 @@ +--- +# Source: portainer/templates/namespace.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: portainer +--- +# Source: portainer/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: portainer-sa-clusteradmin + namespace: portainer + labels: + app.kubernetes.io/name: portainer + app.kubernetes.io/instance: portainer + app.kubernetes.io/version: "ce-latest-ee-2.21.5" +--- +# Source: portainer/templates/pvc.yaml +kind: "PersistentVolumeClaim" +apiVersion: "v1" +metadata: + name: portainer + namespace: portainer + annotations: + volume.alpha.kubernetes.io/storage-class: "generic" + labels: + io.portainer.kubernetes.application.stack: portainer + app.kubernetes.io/name: portainer + app.kubernetes.io/instance: portainer + app.kubernetes.io/version: "ce-latest-ee-2.21.5" +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "10Gi" +--- +# Source: portainer/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: portainer + labels: + app.kubernetes.io/name: portainer + app.kubernetes.io/instance: portainer + app.kubernetes.io/version: "ce-latest-ee-2.21.5" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + namespace: portainer + name: portainer-sa-clusteradmin +--- +# Source: portainer/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: portainer + namespace: portainer + labels: + io.portainer.kubernetes.application.stack: portainer + app.kubernetes.io/name: portainer + app.kubernetes.io/instance: portainer + app.kubernetes.io/version: "ce-latest-ee-2.21.5" +spec: + type: LoadBalancer + ports: + - port: 9000 + targetPort: 9000 + protocol: TCP + name: http + - port: 9443 + targetPort: 9443 + protocol: TCP + name: https + - port: 8000 + targetPort: 8000 + protocol: TCP + name: edge + selector: + app.kubernetes.io/name: portainer + app.kubernetes.io/instance: portainer +--- +# Source: portainer/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: portainer + namespace: portainer + labels: + io.portainer.kubernetes.application.stack: portainer + app.kubernetes.io/name: portainer + app.kubernetes.io/instance: portainer + app.kubernetes.io/version: "ce-latest-ee-2.21.5" +spec: + replicas: 1 + strategy: + type: "Recreate" + selector: + matchLabels: + app.kubernetes.io/name: portainer + app.kubernetes.io/instance: portainer + template: + metadata: + labels: + app.kubernetes.io/name: portainer + app.kubernetes.io/instance: portainer + spec: + nodeSelector: + {} + serviceAccountName: portainer-sa-clusteradmin + volumes: + - name: "data" + persistentVolumeClaim: + claimName: portainer + containers: + - name: portainer + image: "portainer/portainer-ee:2.21.5" + imagePullPolicy: Always + args: + volumeMounts: + - name: data + mountPath: /data + ports: + - name: http + containerPort: 9000 + protocol: TCP + - name: https + containerPort: 9443 + protocol: TCP + - name: tcp-edge + containerPort: 8000 + protocol: TCP + livenessProbe: + httpGet: + path: / + port: 9443 + scheme: HTTPS + readinessProbe: + httpGet: + path: / + port: 9443 + scheme: HTTPS + resources: + {} diff --git a/deploy/manifests/portainer/portainer-lb.yaml b/deploy/manifests/portainer/portainer-lb.yaml index ff7d5c6..15cf8f4 100644 --- a/deploy/manifests/portainer/portainer-lb.yaml +++ b/deploy/manifests/portainer/portainer-lb.yaml @@ -1,34 +1,40 @@ --- +# Source: portainer/templates/namespace.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: portainer +--- +# Source: portainer/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: portainer-sa-clusteradmin + namespace: portainer + labels: + app.kubernetes.io/name: portainer + app.kubernetes.io/instance: portainer + app.kubernetes.io/version: "ce-latest-ee-2.21.5" +--- # Source: portainer/templates/pvc.yaml kind: "PersistentVolumeClaim" apiVersion: "v1" metadata: name: portainer - namespace: portainer + namespace: portainer annotations: volume.alpha.kubernetes.io/storage-class: "generic" labels: io.portainer.kubernetes.application.stack: portainer app.kubernetes.io/name: portainer app.kubernetes.io/instance: portainer - app.kubernetes.io/version: "2.0.0" + app.kubernetes.io/version: "ce-latest-ee-2.21.5" spec: accessModes: - "ReadWriteOnce" resources: requests: - storage: "1Gi" ---- -# Source: portainer/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: portainer-sa-clusteradmin - namespace: portainer - labels: - app.kubernetes.io/name: portainer - app.kubernetes.io/instance: portainer - app.kubernetes.io/version: "2.0.0" + storage: "10Gi" --- # Source: portainer/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -38,15 +44,15 @@ metadata: labels: app.kubernetes.io/name: portainer app.kubernetes.io/instance: portainer - app.kubernetes.io/version: "2.0.0" + app.kubernetes.io/version: "ce-latest-ee-2.21.5" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-admin subjects: -- kind: ServiceAccount - namespace: portainer - name: portainer-sa-clusteradmin + - kind: ServiceAccount + namespace: portainer + name: portainer-sa-clusteradmin --- # Source: portainer/templates/service.yaml apiVersion: v1 @@ -58,7 +64,7 @@ metadata: io.portainer.kubernetes.application.stack: portainer app.kubernetes.io/name: portainer app.kubernetes.io/instance: portainer - app.kubernetes.io/version: "2.0.0" + app.kubernetes.io/version: "ce-latest-ee-2.21.5" spec: type: LoadBalancer ports: @@ -66,6 +72,10 @@ spec: targetPort: 9000 protocol: TCP name: http + - port: 9443 + targetPort: 9443 + protocol: TCP + name: https - port: 8000 targetPort: 8000 protocol: TCP @@ -84,7 +94,7 @@ metadata: io.portainer.kubernetes.application.stack: portainer app.kubernetes.io/name: portainer app.kubernetes.io/instance: portainer - app.kubernetes.io/version: "2.0.0" + app.kubernetes.io/version: "ce-latest-ee-2.21.5" spec: replicas: 1 strategy: @@ -99,15 +109,18 @@ spec: app.kubernetes.io/name: portainer app.kubernetes.io/instance: portainer spec: + nodeSelector: + {} serviceAccountName: portainer-sa-clusteradmin volumes: - - name: "data" - persistentVolumeClaim: - claimName: portainer + - name: "data" + persistentVolumeClaim: + claimName: portainer containers: - name: portainer - image: "portainer/portainer-ce:latest" - imagePullPolicy: IfNotPresent + image: "portainer/portainer-ce:2.21.5" + imagePullPolicy: Always + args: volumeMounts: - name: data mountPath: /data @@ -115,17 +128,21 @@ spec: - name: http containerPort: 9000 protocol: TCP + - name: https + containerPort: 9443 + protocol: TCP - name: tcp-edge containerPort: 8000 - protocol: TCP + protocol: TCP livenessProbe: httpGet: path: / - port: 9000 + port: 9443 + scheme: HTTPS readinessProbe: httpGet: path: / - port: 9000 + port: 9443 + scheme: HTTPS resources: {} - diff --git a/deploy/manifests/portainer/portainer.yaml b/deploy/manifests/portainer/portainer.yaml index 7868352..89de758 100644 --- a/deploy/manifests/portainer/portainer.yaml +++ b/deploy/manifests/portainer/portainer.yaml @@ -1,34 +1,40 @@ --- +# Source: portainer/templates/namespace.yaml +apiVersion: v1 +kind: Namespace +metadata: + name: portainer +--- +# Source: portainer/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: portainer-sa-clusteradmin + namespace: portainer + labels: + app.kubernetes.io/name: portainer + app.kubernetes.io/instance: portainer + app.kubernetes.io/version: "ce-latest-ee-2.21.5" +--- # Source: portainer/templates/pvc.yaml kind: "PersistentVolumeClaim" apiVersion: "v1" metadata: name: portainer - namespace: portainer + namespace: portainer annotations: volume.alpha.kubernetes.io/storage-class: "generic" labels: io.portainer.kubernetes.application.stack: portainer app.kubernetes.io/name: portainer app.kubernetes.io/instance: portainer - app.kubernetes.io/version: "2.0.0" + app.kubernetes.io/version: "ce-latest-ee-2.21.5" spec: accessModes: - "ReadWriteOnce" resources: requests: - storage: "1Gi" ---- -# Source: portainer/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: portainer-sa-clusteradmin - namespace: portainer - labels: - app.kubernetes.io/name: portainer - app.kubernetes.io/instance: portainer - app.kubernetes.io/version: "2.0.0" + storage: "10Gi" --- # Source: portainer/templates/rbac.yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -38,15 +44,15 @@ metadata: labels: app.kubernetes.io/name: portainer app.kubernetes.io/instance: portainer - app.kubernetes.io/version: "2.0.0" + app.kubernetes.io/version: "ce-latest-ee-2.21.5" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-admin subjects: -- kind: ServiceAccount - namespace: portainer - name: portainer-sa-clusteradmin + - kind: ServiceAccount + namespace: portainer + name: portainer-sa-clusteradmin --- # Source: portainer/templates/service.yaml apiVersion: v1 @@ -58,7 +64,7 @@ metadata: io.portainer.kubernetes.application.stack: portainer app.kubernetes.io/name: portainer app.kubernetes.io/instance: portainer - app.kubernetes.io/version: "2.0.0" + app.kubernetes.io/version: "ce-latest-ee-2.21.5" spec: type: NodePort ports: @@ -66,7 +72,15 @@ spec: targetPort: 9000 protocol: TCP name: http - targetPort: 8000 + nodePort: 30777 + - port: 9443 + targetPort: 9443 + protocol: TCP + name: https + nodePort: 30779 + - port: 30776 + targetPort: 30776 + protocol: TCP name: edge nodePort: 30776 selector: @@ -83,7 +97,7 @@ metadata: io.portainer.kubernetes.application.stack: portainer app.kubernetes.io/name: portainer app.kubernetes.io/instance: portainer - app.kubernetes.io/version: "2.0.0" + app.kubernetes.io/version: "ce-latest-ee-2.21.5" spec: replicas: 1 strategy: @@ -98,16 +112,19 @@ spec: app.kubernetes.io/name: portainer app.kubernetes.io/instance: portainer spec: + nodeSelector: + {} serviceAccountName: portainer-sa-clusteradmin volumes: - - name: "data" - persistentVolumeClaim: - claimName: portainer + - name: "data" + persistentVolumeClaim: + claimName: portainer containers: - name: portainer - image: "portainer/portainer-ce:latest" - imagePullPolicy: IfNotPresent - args: [ '--tunnel-port','30776' ] + image: "portainer/portainer-ce:2.21.5" + imagePullPolicy: Always + args: + - '--tunnel-port=30776' volumeMounts: - name: data mountPath: /data @@ -115,17 +132,21 @@ spec: - name: http containerPort: 9000 protocol: TCP + - name: https + containerPort: 9443 + protocol: TCP - name: tcp-edge containerPort: 8000 - protocol: TCP + protocol: TCP livenessProbe: httpGet: path: / - port: 9000 + port: 9443 + scheme: HTTPS readinessProbe: httpGet: path: / - port: 9000 + port: 9443 + scheme: HTTPS resources: {} -