From 42b55afb7052625ac2486ce56e75a073b69ec40e Mon Sep 17 00:00:00 2001 From: Nicolas Takashi Date: Mon, 20 Nov 2023 16:55:30 +0000 Subject: [PATCH] [CHORE] adding eBPF helm chart (#855) * [CHORE] adding eBPF helm chart Signed-off-by: Nicolas Takashi * Update .github/workflows/ebpf-test.yaml Co-authored-by: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com> * Update charts/opentelemetry-ebpf/Chart.yaml Co-authored-by: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com> * Update CONTRIBUTING.md Co-authored-by: Jared Tan * Update CONTRIBUTING.md Co-authored-by: Jared Tan * Update CONTRIBUTING.md Co-authored-by: Jared Tan * Update README.md Co-authored-by: Jared Tan * Update README.md Co-authored-by: Jared Tan * Update README.md Co-authored-by: Jared Tan * Update charts/opentelemetry-ebpf/CONTRIBUTING.md * Update charts/opentelemetry-ebpf/values.yaml * Update charts/opentelemetry-ebpf/values.yaml Co-authored-by: Dmitrii Anoshin --------- Signed-off-by: Nicolas Takashi Co-authored-by: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com> Co-authored-by: Jared Tan Co-authored-by: Dmitrii Anoshin --- .github/workflows/ebpf-test.yaml | 37 +++ Makefile | 2 +- charts/opentelemetry-ebpf/.helmignore | 23 ++ charts/opentelemetry-ebpf/CONTRIBUTING.md | 12 + charts/opentelemetry-ebpf/Chart.yaml | 14 + charts/opentelemetry-ebpf/README.md | 30 ++ .../rendered/cloud-collector-deployment.yaml | 44 +++ .../cloud-collector-serviceaccount.yaml | 12 + .../cloud-collector/rendered/configmap.yaml | 16 + .../rendered/k8s-collector-clusterrole.yaml | 29 ++ .../k8s-collector-clusterrolebinding.yaml | 20 ++ .../rendered/k8s-collector-deployment.yaml | 75 +++++ .../k8s-collector-serviceaccount.yaml | 12 + .../kernel-collector-clusterrole.yaml | 21 ++ .../kernel-collector-clusterrolebinding.yaml | 20 ++ .../rendered/kernel-collector-daemonset.yaml | 89 ++++++ .../kernel-collector-serviceaccount.yaml | 12 + .../rendered/reducer-deployment.yaml | 53 ++++ .../rendered/reducer-service.yaml | 29 ++ .../examples/cloud-collector/values.yaml | 5 + .../opentelemetry-ebpf/templates/_helpers.tpl | 168 +++++++++++ .../templates/cloud-collector-deployment.yaml | 119 ++++++++ .../cloud-collector-serviceaccount.yaml | 12 + .../templates/configmap.yaml | 10 + .../templates/k8s-collector-clusterrole.yaml | 25 ++ .../k8s-collector-clusterrolebinding.yaml | 16 + .../templates/k8s-collector-deployment.yaml | 169 +++++++++++ .../k8s-collector-serviceaccount.yaml | 8 + .../kernel-collector-clusterrole.yaml | 17 ++ .../kernel-collector-clusterrolebinding.yaml | 16 + .../templates/kernel-collector-daemonset.yaml | 200 +++++++++++++ .../kernel-collector-serviceaccount.yaml | 8 + .../templates/reducer-deployment.yaml | 125 ++++++++ .../templates/reducer-service.yaml | 16 + charts/opentelemetry-ebpf/values.schema.json | 163 +++++++++++ charts/opentelemetry-ebpf/values.yaml | 275 ++++++++++++++++++ 36 files changed, 1901 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/ebpf-test.yaml create mode 100644 charts/opentelemetry-ebpf/.helmignore create mode 100644 charts/opentelemetry-ebpf/CONTRIBUTING.md create mode 100644 charts/opentelemetry-ebpf/Chart.yaml create mode 100644 charts/opentelemetry-ebpf/README.md create mode 100644 charts/opentelemetry-ebpf/examples/cloud-collector/rendered/cloud-collector-deployment.yaml create mode 100644 charts/opentelemetry-ebpf/examples/cloud-collector/rendered/cloud-collector-serviceaccount.yaml create mode 100644 charts/opentelemetry-ebpf/examples/cloud-collector/rendered/configmap.yaml create mode 100644 charts/opentelemetry-ebpf/examples/cloud-collector/rendered/k8s-collector-clusterrole.yaml create mode 100644 charts/opentelemetry-ebpf/examples/cloud-collector/rendered/k8s-collector-clusterrolebinding.yaml create mode 100644 charts/opentelemetry-ebpf/examples/cloud-collector/rendered/k8s-collector-deployment.yaml create mode 100644 charts/opentelemetry-ebpf/examples/cloud-collector/rendered/k8s-collector-serviceaccount.yaml create mode 100644 charts/opentelemetry-ebpf/examples/cloud-collector/rendered/kernel-collector-clusterrole.yaml create mode 100644 charts/opentelemetry-ebpf/examples/cloud-collector/rendered/kernel-collector-clusterrolebinding.yaml create mode 100644 charts/opentelemetry-ebpf/examples/cloud-collector/rendered/kernel-collector-daemonset.yaml create mode 100644 charts/opentelemetry-ebpf/examples/cloud-collector/rendered/kernel-collector-serviceaccount.yaml create mode 100644 charts/opentelemetry-ebpf/examples/cloud-collector/rendered/reducer-deployment.yaml create mode 100644 charts/opentelemetry-ebpf/examples/cloud-collector/rendered/reducer-service.yaml create mode 100644 charts/opentelemetry-ebpf/examples/cloud-collector/values.yaml create mode 100644 charts/opentelemetry-ebpf/templates/_helpers.tpl create mode 100644 charts/opentelemetry-ebpf/templates/cloud-collector-deployment.yaml create mode 100644 charts/opentelemetry-ebpf/templates/cloud-collector-serviceaccount.yaml create mode 100644 charts/opentelemetry-ebpf/templates/configmap.yaml create mode 100644 charts/opentelemetry-ebpf/templates/k8s-collector-clusterrole.yaml create mode 100644 charts/opentelemetry-ebpf/templates/k8s-collector-clusterrolebinding.yaml create mode 100644 charts/opentelemetry-ebpf/templates/k8s-collector-deployment.yaml create mode 100644 charts/opentelemetry-ebpf/templates/k8s-collector-serviceaccount.yaml create mode 100644 charts/opentelemetry-ebpf/templates/kernel-collector-clusterrole.yaml create mode 100644 charts/opentelemetry-ebpf/templates/kernel-collector-clusterrolebinding.yaml create mode 100644 charts/opentelemetry-ebpf/templates/kernel-collector-daemonset.yaml create mode 100644 charts/opentelemetry-ebpf/templates/kernel-collector-serviceaccount.yaml create mode 100644 charts/opentelemetry-ebpf/templates/reducer-deployment.yaml create mode 100644 charts/opentelemetry-ebpf/templates/reducer-service.yaml create mode 100644 charts/opentelemetry-ebpf/values.schema.json create mode 100644 charts/opentelemetry-ebpf/values.yaml diff --git a/.github/workflows/ebpf-test.yaml b/.github/workflows/ebpf-test.yaml new file mode 100644 index 000000000..c73551226 --- /dev/null +++ b/.github/workflows/ebpf-test.yaml @@ -0,0 +1,37 @@ +name: Test eBPF Chart + +on: + pull_request: + paths: + - "charts/opentelemetry-ebpf/**" + branches: + - main + +jobs: + ebpf-test: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Setup + uses: ./.github/actions/setup + with: + create-kind-cluster: "true" + + - name: Run chart-testing (install) + run: ct install --charts charts/opentelemetry-ebpf + + - name: Run daemonset and deployment install test + run: | + kubectl apply -f ./charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered + + kubectl rollout status deployment example-opentelemetry-collector --timeout=30s + + kubectl apply -f ./charts/opentelemetry-ebpf/examples/cloud-collector/rendered + + kubectl rollout status daemonset example-opentelemetry-ebpf-kernel-collector --timeout=30s + kubectl rollout status deployment example-opentelemetry-ebpf-cloud-collector --timeout=30s + kubectl rollout status deployment example-opentelemetry-ebpf-k8s-collector --timeout=30s + kubectl rollout status deployment example-opentelemetry-ebpf-reducer --timeout=30s diff --git a/Makefile b/Makefile index 133285f75..bc0f1eb18 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ TMP_DIRECTORY = ./tmp -CHARTS ?= opentelemetry-collector opentelemetry-operator opentelemetry-demo +CHARTS ?= opentelemetry-collector opentelemetry-operator opentelemetry-demo opentelemetry-ebpf .PHONY: generate-examples generate-examples: diff --git a/charts/opentelemetry-ebpf/.helmignore b/charts/opentelemetry-ebpf/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/charts/opentelemetry-ebpf/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/opentelemetry-ebpf/CONTRIBUTING.md b/charts/opentelemetry-ebpf/CONTRIBUTING.md new file mode 100644 index 000000000..a8ed2bed2 --- /dev/null +++ b/charts/opentelemetry-ebpf/CONTRIBUTING.md @@ -0,0 +1,12 @@ +# OpenTelemetry eBPF Chart Contributing Guide + +All changes to the chart require a bump to the version in `chart.yaml`. See the [Contributing Guide](https://github.com/open-telemetry/opentelemetry-helm-charts/blob/main/CONTRIBUTING.md#versioning) for our versioning requirements. + +Once the chart version is bumped, the examples must be regenerated. You can regenerate examples by running `make generate-examples CHARTS=opentelemetry-ebpf`. + +## Bumping Default Collector Version + +1. Increase the minor version of the chart by one and set the patch version to zero. +2. Update the chart's `appVersion` to match the new collector version. This version will be used as the image tag by default. +3. Review the corresponding release notes in [Opentelemetry eBPF](https://github.com/open-telemetry/opentelemetry-ebpf/releases). If any changes affect the helm charts, adjust the helm chart accordingly. +4. Run `make generate-examples CHARTS=opentelemetry-ebpf`. diff --git a/charts/opentelemetry-ebpf/Chart.yaml b/charts/opentelemetry-ebpf/Chart.yaml new file mode 100644 index 000000000..220a31e25 --- /dev/null +++ b/charts/opentelemetry-ebpf/Chart.yaml @@ -0,0 +1,14 @@ +apiVersion: v2 +name: opentelemetry-ebpf +version: 0.1.0 +description: OpenTelemetry eBPF Helm chart for Kubernetes +type: application +home: https://opentelemetry.io/ +sources: + - https://github.com/open-telemetry/opentelemetry-ebpf +maintainers: + - name: dmitryax + - name: TylerHelmuth + - name: nicolastakashi +icon: https://opentelemetry.io/img/logos/opentelemetry-logo-nav.png +appVersion: v0.10.0 diff --git a/charts/opentelemetry-ebpf/README.md b/charts/opentelemetry-ebpf/README.md new file mode 100644 index 000000000..4bc55b1a9 --- /dev/null +++ b/charts/opentelemetry-ebpf/README.md @@ -0,0 +1,30 @@ +# OpenTelemetry Collector eBPF Helm Chart + +The helm chart installs [OpenTelemetry eBPF](https://github.com/open-telemetry/opentelemetry-ebpf) +in kubernetes cluster. + +## Prerequisites + +- Kubernetes 1.24+ +- Helm 3.9+ + +## Installing the Chart + +Add OpenTelemetry Helm repository: + +```console +helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm-charts +``` + +To install the chart with the release name my-opentelemetry-ebpf, run the following command: + +```console +helm install my-opentelemetry-ebpf open-telemetry/opentelemetry-ebpf +``` + +### Other configuration options + +The [values.yaml](./values.yaml) file contains information about all other configuration +options for this chart. + +For more examples see [Examples](examples). \ No newline at end of file diff --git a/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/cloud-collector-deployment.yaml b/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/cloud-collector-deployment.yaml new file mode 100644 index 000000000..43942d5af --- /dev/null +++ b/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/cloud-collector-deployment.yaml @@ -0,0 +1,44 @@ +--- +# Source: opentelemetry-ebpf/templates/cloud-collector-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: example-opentelemetry-ebpf-cloud-collector + labels: + helm.sh/chart: opentelemetry-ebpf-0.1.0 + app.kubernetes.io/name: opentelemetry-ebpf + app.kubernetes.io/instance: example + app.kubernetes.io/version: "v0.10.0" + app.kubernetes.io/managed-by: Helm +spec: + progressDeadlineSeconds: 600 + replicas: 1 + revisionHistoryLimit: 3 + selector: + matchLabels: + app.kubernetes.io/name: example-opentelemetry-ebpf-cloud-collector + app.kubernetes.io/instance: example + strategy: + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: example-opentelemetry-ebpf-cloud-collector + app.kubernetes.io/instance: example + spec: + containers: + - name: cloud-collector + image: "otel/opentelemetry-ebpf-cloud-collector:v0.10.0" + imagePullPolicy: IfNotPresent + args: + - --warning + env: + - name: "EBPF_NET_CLUSTER_NAME" + value: "" + - name: "EBPF_NET_INTAKE_HOST" + value: example-opentelemetry-ebpf-reducer + - name: "EBPF_NET_INTAKE_PORT" + value: "7000" + terminationGracePeriodSeconds: 30 + securityContext: {} + serviceAccountName: example-opentelemetry-ebpf-cloud-collector diff --git a/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/cloud-collector-serviceaccount.yaml b/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/cloud-collector-serviceaccount.yaml new file mode 100644 index 000000000..de573352f --- /dev/null +++ b/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/cloud-collector-serviceaccount.yaml @@ -0,0 +1,12 @@ +--- +# Source: opentelemetry-ebpf/templates/cloud-collector-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: example-opentelemetry-ebpf-cloud-collector + labels: + helm.sh/chart: opentelemetry-ebpf-0.1.0 + app.kubernetes.io/name: opentelemetry-ebpf + app.kubernetes.io/instance: example + app.kubernetes.io/version: "v0.10.0" + app.kubernetes.io/managed-by: Helm diff --git a/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/configmap.yaml b/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/configmap.yaml new file mode 100644 index 000000000..2c79cc9a5 --- /dev/null +++ b/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/configmap.yaml @@ -0,0 +1,16 @@ +--- +# Source: opentelemetry-ebpf/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-opentelemetry-ebpf-config + labels: + helm.sh/chart: opentelemetry-ebpf-0.1.0 + app.kubernetes.io/name: opentelemetry-ebpf + app.kubernetes.io/instance: example + app.kubernetes.io/version: "v0.10.0" + app.kubernetes.io/managed-by: Helm +data: + config.yaml: | + labels: + environment: "" diff --git a/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/k8s-collector-clusterrole.yaml b/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/k8s-collector-clusterrole.yaml new file mode 100644 index 000000000..dd202d0e3 --- /dev/null +++ b/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/k8s-collector-clusterrole.yaml @@ -0,0 +1,29 @@ +--- +# Source: opentelemetry-ebpf/templates/k8s-collector-clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: example-opentelemetry-ebpf-k8s-collector + labels: + helm.sh/chart: opentelemetry-ebpf-0.1.0 + app.kubernetes.io/name: opentelemetry-ebpf + app.kubernetes.io/instance: example + app.kubernetes.io/version: "v0.10.0" + app.kubernetes.io/managed-by: Helm +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - replicasets + verbs: + - get + - list + - watch diff --git a/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/k8s-collector-clusterrolebinding.yaml b/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/k8s-collector-clusterrolebinding.yaml new file mode 100644 index 000000000..151df6b06 --- /dev/null +++ b/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/k8s-collector-clusterrolebinding.yaml @@ -0,0 +1,20 @@ +--- +# Source: opentelemetry-ebpf/templates/k8s-collector-clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: example-opentelemetry-ebpf-k8s-collector + labels: + helm.sh/chart: opentelemetry-ebpf-0.1.0 + app.kubernetes.io/name: opentelemetry-ebpf + app.kubernetes.io/instance: example + app.kubernetes.io/version: "v0.10.0" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: example-opentelemetry-ebpf-k8s-collector +subjects: +- kind: ServiceAccount + name: example-opentelemetry-ebpf-k8s-collector + namespace: default diff --git a/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/k8s-collector-deployment.yaml b/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/k8s-collector-deployment.yaml new file mode 100644 index 000000000..b2bb6d772 --- /dev/null +++ b/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/k8s-collector-deployment.yaml @@ -0,0 +1,75 @@ +--- +# Source: opentelemetry-ebpf/templates/k8s-collector-deployment.yaml +# The k8s-collector consists of two services: +# 1) k8s-watcher: talks to the Kubernetes API server to determine the current state of +# the cluster; sets up watches to be notified of subsequent changes to pods, services +# and other resources. +# 2) k8s-relay: relays the information collected by k8s-watcher to the reducer. +apiVersion: apps/v1 +kind: Deployment +metadata: + name: example-opentelemetry-ebpf-k8s-collector + labels: + helm.sh/chart: opentelemetry-ebpf-0.1.0 + app.kubernetes.io/name: opentelemetry-ebpf + app.kubernetes.io/instance: example + app.kubernetes.io/version: "v0.10.0" + app.kubernetes.io/managed-by: Helm +spec: + progressDeadlineSeconds: 600 + replicas: 1 + revisionHistoryLimit: 3 + selector: + matchLabels: + app.kubernetes.io/name: example-opentelemetry-ebpf-k8s-collector + app.kubernetes.io/instance: example + strategy: + type: RollingUpdate + template: + metadata: + annotations: + # This is here to allow us to do "zero-downtime" updates without an image change. + rollingUpdateVersion: "1" + charts.flowmill.com/version: 0.1.0 + labels: + app.kubernetes.io/name: example-opentelemetry-ebpf-k8s-collector + app.kubernetes.io/instance: example + spec: + containers: + - name: k8s-watcher + image: "otel/opentelemetry-ebpf-k8s-watcher:v0.10.0" + imagePullPolicy: IfNotPresent + args: + - --log-console + - --log-level=warning + # k8s-relay, which is a service that the k8s-watcher talks to. + # Currently not configurable, has to be reachable on localhost:8172, so must + # share a pod with the k8s-watcher above. + - name: k8s-relay + image: "otel/opentelemetry-ebpf-k8s-relay:v0.10.0" + imagePullPolicy: IfNotPresent + args: + - --config-file=/etc/network-explorer/config.yaml + - --warning + env: + - name: "EBPF_NET_CLUSTER_NAME" + value: "" + - name: "EBPF_NET_INTAKE_HOST" + value: example-opentelemetry-ebpf-reducer + - name: "EBPF_NET_INTAKE_PORT" + value: "7000" + volumeMounts: + - mountPath: /etc/network-explorer + name: k8s-relay-config + terminationGracePeriodSeconds: 30 + volumes: + - name: k8s-relay-config + projected: + sources: + - configMap: + name: example-opentelemetry-ebpf-config + items: + - key: config.yaml + path: config.yaml + securityContext: {} + serviceAccountName: example-opentelemetry-ebpf-k8s-collector diff --git a/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/k8s-collector-serviceaccount.yaml b/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/k8s-collector-serviceaccount.yaml new file mode 100644 index 000000000..55d527c41 --- /dev/null +++ b/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/k8s-collector-serviceaccount.yaml @@ -0,0 +1,12 @@ +--- +# Source: opentelemetry-ebpf/templates/k8s-collector-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: example-opentelemetry-ebpf-k8s-collector + labels: + helm.sh/chart: opentelemetry-ebpf-0.1.0 + app.kubernetes.io/name: opentelemetry-ebpf + app.kubernetes.io/instance: example + app.kubernetes.io/version: "v0.10.0" + app.kubernetes.io/managed-by: Helm diff --git a/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/kernel-collector-clusterrole.yaml b/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/kernel-collector-clusterrole.yaml new file mode 100644 index 000000000..edb0fff5f --- /dev/null +++ b/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/kernel-collector-clusterrole.yaml @@ -0,0 +1,21 @@ +--- +# Source: opentelemetry-ebpf/templates/kernel-collector-clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: example-opentelemetry-ebpf-kernel-collector + labels: + helm.sh/chart: opentelemetry-ebpf-0.1.0 + app.kubernetes.io/name: opentelemetry-ebpf + app.kubernetes.io/instance: example + app.kubernetes.io/version: "v0.10.0" + app.kubernetes.io/managed-by: Helm +rules: +- apiGroups: + - policy + resourceNames: + - example-opentelemetry-ebpf-kernel-collector + resources: + - podsecuritypolicies + verbs: + - use diff --git a/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/kernel-collector-clusterrolebinding.yaml b/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/kernel-collector-clusterrolebinding.yaml new file mode 100644 index 000000000..1014ba8b9 --- /dev/null +++ b/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/kernel-collector-clusterrolebinding.yaml @@ -0,0 +1,20 @@ +--- +# Source: opentelemetry-ebpf/templates/kernel-collector-clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: example-opentelemetry-ebpf-kernel-collector + labels: + helm.sh/chart: opentelemetry-ebpf-0.1.0 + app.kubernetes.io/name: opentelemetry-ebpf + app.kubernetes.io/instance: example + app.kubernetes.io/version: "v0.10.0" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: example-opentelemetry-ebpf-kernel-collector +subjects: +- kind: ServiceAccount + name: example-opentelemetry-ebpf-kernel-collector + namespace: default diff --git a/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/kernel-collector-daemonset.yaml b/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/kernel-collector-daemonset.yaml new file mode 100644 index 000000000..2027790bb --- /dev/null +++ b/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/kernel-collector-daemonset.yaml @@ -0,0 +1,89 @@ +--- +# Source: opentelemetry-ebpf/templates/kernel-collector-daemonset.yaml +# kernel collector daemonset: deploys the kernel collector to each node in the cluster. +# The kernel collector needs to be able to compile and install +# eBPF programs in the node's kernel, so needs to run as root and +# needs to mount /lib/modules and /usr/src from the node itself. +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: example-opentelemetry-ebpf-kernel-collector + labels: + helm.sh/chart: opentelemetry-ebpf-0.1.0 + app.kubernetes.io/name: opentelemetry-ebpf + app.kubernetes.io/instance: example + app.kubernetes.io/version: "v0.10.0" + app.kubernetes.io/managed-by: Helm +spec: + selector: + matchLabels: + app.kubernetes.io/name: example-opentelemetry-ebpf-kernel-collector + app.kubernetes.io/instance: example + template: + metadata: + annotations: + charts.flowmill.com/version: 0.1.0 + labels: + app.kubernetes.io/name: example-opentelemetry-ebpf-kernel-collector + app.kubernetes.io/instance: example + spec: + containers: + - name: kernel-collector + image: "otel/opentelemetry-ebpf-kernel-collector:v0.10.0" + imagePullPolicy: IfNotPresent + args: + - --config-file=/etc/network-explorer/config.yaml + - --disable-nomad-metadata + - --warning + # TODO: liveness probe + env: + - name: "EBPF_NET_CLUSTER_NAME" + value: "" + - name: "EBPF_NET_DISABLE_HTTP_METRICS" + value: "false" + - name: "EBPF_NET_KERNEL_HEADERS_AUTO_FETCH" + value: "true" + - name: "EBPF_NET_INTAKE_HOST" + value: example-opentelemetry-ebpf-reducer + - name: "EBPF_NET_INTAKE_PORT" + value: "7000" + - name: "EBPF_NET_HOST_DIR" + value: "/hostfs" + securityContext: + privileged: true + volumeMounts: + - mountPath: /hostfs/ + name: host-root + readOnly: true + - mountPath: /hostfs/var/cache + name: host-var-cache + readOnly: false + - mountPath: /etc/network-explorer + name: example-opentelemetry-ebpf-config + readOnly: true + dnsPolicy: ClusterFirstWithHostNet + hostNetwork: true + hostPID: true + serviceAccountName: example-opentelemetry-ebpf-kernel-collector + volumes: + - name: example-opentelemetry-ebpf-config + projected: + sources: + - configMap: + name: example-opentelemetry-ebpf-config + items: + - key: config.yaml + path: config.yaml + - name: host-root + hostPath: + path: / + type: Directory + - name: host-var-cache + hostPath: + path: /var/cache + type: DirectoryOrCreate + tolerations: + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists diff --git a/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/kernel-collector-serviceaccount.yaml b/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/kernel-collector-serviceaccount.yaml new file mode 100644 index 000000000..dbfd73dc1 --- /dev/null +++ b/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/kernel-collector-serviceaccount.yaml @@ -0,0 +1,12 @@ +--- +# Source: opentelemetry-ebpf/templates/kernel-collector-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: example-opentelemetry-ebpf-kernel-collector + labels: + helm.sh/chart: opentelemetry-ebpf-0.1.0 + app.kubernetes.io/name: opentelemetry-ebpf + app.kubernetes.io/instance: example + app.kubernetes.io/version: "v0.10.0" + app.kubernetes.io/managed-by: Helm diff --git a/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/reducer-deployment.yaml b/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/reducer-deployment.yaml new file mode 100644 index 000000000..6274ebcca --- /dev/null +++ b/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/reducer-deployment.yaml @@ -0,0 +1,53 @@ +--- +# Source: opentelemetry-ebpf/templates/reducer-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: example-opentelemetry-ebpf-reducer + labels: + helm.sh/chart: opentelemetry-ebpf-0.1.0 + app.kubernetes.io/name: opentelemetry-ebpf + app.kubernetes.io/instance: example + app.kubernetes.io/version: "v0.10.0" + app.kubernetes.io/managed-by: Helm +spec: + replicas: 1 + strategy: + type: RollingUpdate + selector: + matchLabels: + app.kubernetes.io/name: example-opentelemetry-ebpf-reducer + app.kubernetes.io/instance: example + template: + metadata: + labels: + app.kubernetes.io/name: example-opentelemetry-ebpf-reducer + app.kubernetes.io/instance: example + spec: + containers: + - name: reducer + image: "otel/opentelemetry-ebpf-reducer:v0.10.0" + imagePullPolicy: IfNotPresent + args: + - --port=7000 + - --log-console + - --no-log-file + - --warning + - --enable-aws-enrichment + - --disable-prometheus-metrics + - --enable-otlp-grpc-metrics + - --otlp-grpc-metrics-host=example-opentelemetry-collector + - --otlp-grpc-metrics-port=4317 + - --num-ingest-shards=1 + - --num-matching-shards=1 + - --num-aggregation-shards=1 + ports: + - name: telemetry + containerPort: 7000 + protocol: TCP + readinessProbe: + initialDelaySeconds: 5 + periodSeconds: 20 + timeoutSeconds: 5 + exec: + command: ['/srv/health_check.sh', 'readiness_probe', 'localhost', "7000"] diff --git a/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/reducer-service.yaml b/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/reducer-service.yaml new file mode 100644 index 000000000..e75e0012d --- /dev/null +++ b/charts/opentelemetry-ebpf/examples/cloud-collector/rendered/reducer-service.yaml @@ -0,0 +1,29 @@ +--- +# Source: opentelemetry-ebpf/templates/reducer-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: example-opentelemetry-ebpf-reducer + labels: + helm.sh/chart: opentelemetry-ebpf-0.1.0 + app.kubernetes.io/name: opentelemetry-ebpf + app.kubernetes.io/instance: example + app.kubernetes.io/version: "v0.10.0" + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + selector: + app.kubernetes.io/name: example-opentelemetry-ebpf-reducer + app.kubernetes.io/instance: example + ports: + + - name: stats + port: 7001 + targetPort: 7001 + protocol: TCP + appProtocol: http + - name: telemetry + port: 7000 + targetPort: 7000 + protocol: TCP + appProtocol: http diff --git a/charts/opentelemetry-ebpf/examples/cloud-collector/values.yaml b/charts/opentelemetry-ebpf/examples/cloud-collector/values.yaml new file mode 100644 index 000000000..a16615a17 --- /dev/null +++ b/charts/opentelemetry-ebpf/examples/cloud-collector/values.yaml @@ -0,0 +1,5 @@ +cloudCollector: + enabled: true + +endpoint: + address: example-opentelemetry-collector diff --git a/charts/opentelemetry-ebpf/templates/_helpers.tpl b/charts/opentelemetry-ebpf/templates/_helpers.tpl new file mode 100644 index 000000000..3ab3ecb9d --- /dev/null +++ b/charts/opentelemetry-ebpf/templates/_helpers.tpl @@ -0,0 +1,168 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "opentelemetry-ebpf.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{- define "opentelemetry-ebpf.lowercase_chartname" -}} +{{- default .Chart.Name | lower }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "opentelemetry-ebpf.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "opentelemetry-ebpf.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "opentelemetry-ebpf.labels" -}} +helm.sh/chart: {{ include "opentelemetry-ebpf.chart" . }} +{{ include "opentelemetry-ebpf.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "opentelemetry-ebpf.selectorLabels" -}} +app.kubernetes.io/name: {{ include "opentelemetry-ebpf.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Fully qualified app name for the cloud-collector deployment. +*/}} +{{- define "opentelemetry-collector-cloud-collector.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-cloud-collector" .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-cloud-collector" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the cloud-collector +*/}} +{{- define "opentelemetry-collector-cloud-collector.serviceAccountName" -}} +{{- if .Values.cloudCollector.serviceAccount.create }} +{{- default (include "opentelemetry-collector-cloud-collector.fullname" .) .Values.cloudCollector.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.cloudCollector.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Fully qualified app name for the k8s-collector deployment. +*/}} +{{- define "opentelemetry-collector-k8s-collector.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-k8s-collector" .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-k8s-collector" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the k8s-collector +*/}} +{{- define "opentelemetry-collector-k8s-collector.serviceAccountName" -}} +{{- if .Values.k8sCollector.serviceAccount.create }} +{{- default (include "opentelemetry-collector-k8s-collector.fullname" .) .Values.k8sCollector.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.k8sCollector.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Fully qualified app name for the kernel-collector daemonset. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "opentelemetry-collector-kernel-collector.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-kernel-collector" .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-kernel-collector" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the kernel-collector +*/}} +{{- define "opentelemetry-collector-kernel-collector.serviceAccountName" -}} +{{- if .Values.kernelCollector.serviceAccount.create }} +{{- default (include "opentelemetry-collector-kernel-collector.fullname" .) .Values.kernelCollector.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.kernelCollector.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Fully qualified app name for the reducer deployment. +*/}} +{{- define "opentelemetry-collector-reducer.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-reducer" .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-reducer" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* Build the list of port for service */}} +{{- define "opentelemetry-collector-reducer.servicePortsConfig" -}} +{{- $ports := deepCopy .Values.reducer.service.ports }} +{{- range $key, $port := $ports }} +{{- if $port.enabled }} +- name: {{ $key }} + port: {{ $port.servicePort }} + targetPort: {{ $port.containerPort }} + protocol: {{ $port.protocol }} + {{- if $port.appProtocol }} + appProtocol: {{ $port.appProtocol }} + {{- end }} +{{- if $port.nodePort }} + nodePort: {{ $port.nodePort }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Return the appropriate apiVersion for podDisruptionBudget. +*/}} +{{- define "podSecurityPolicy.apiVersion" -}} + {{- if and (.Capabilities.APIVersions.Has "policy/v1") (semverCompare ">= 1.21-0" .Capabilities.KubeVersion.Version) -}} + {{- print "policy/v1" -}} + {{- else -}} + {{- print "policy/v1beta1" -}} + {{- end -}} +{{- end -}} \ No newline at end of file diff --git a/charts/opentelemetry-ebpf/templates/cloud-collector-deployment.yaml b/charts/opentelemetry-ebpf/templates/cloud-collector-deployment.yaml new file mode 100644 index 000000000..513dea499 --- /dev/null +++ b/charts/opentelemetry-ebpf/templates/cloud-collector-deployment.yaml @@ -0,0 +1,119 @@ +{{- if .Values.cloudCollector.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "opentelemetry-collector-cloud-collector.fullname" . }} + labels: + {{- include "opentelemetry-ebpf.labels" . | nindent 4 }} +spec: + progressDeadlineSeconds: 600 + replicas: 1 + revisionHistoryLimit: 3 + selector: + matchLabels: + app.kubernetes.io/name: {{ include "opentelemetry-collector-cloud-collector.fullname" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + strategy: + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "opentelemetry-collector-cloud-collector.fullname" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + spec: +{{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{- range .Values.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- end }} + containers: + - name: cloud-collector + {{- if .Values.cloudCollector.image.tag }} + image: "{{ default .Values.image.registry .Values.cloudCollector.image.registry }}/{{ .Values.cloudCollector.image.name }}:{{ .Values.cloudCollector.image.tag }}" + {{- else }} + image: "{{ default .Values.image.registry .Values.cloudCollector.image.registry }}/{{ .Values.cloudCollector.image.name }}:{{ default .Chart.AppVersion .Values.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} +{{- if hasKey .Values.cloudCollector "resources" }} + resources: +{{ toYaml .Values.cloudCollector.resources | indent 10 }} +{{- end }} + args: +{{- if .Values.log.console }} + - --log-console +{{- end }} +{{- if hasKey .Values.log "level" }} + - --{{ .Values.log.level }} +{{- end }} +{{- if hasKey .Values.cloudCollector "log" -}} +{{- if hasKey .Values.cloudCollector.log "whitelist" }} +{{- if hasKey .Values.cloudCollector.log.whitelist "channel" }} + - --log-whitelist-channel={{ .Values.cloudCollector.log.whitelist.channel }} +{{- end }} +{{- if hasKey .Values.cloudCollector.log.whitelist "component" }} + - --log-whitelist-component={{ .Values.cloudCollector.log.whitelist.component }} +{{- end }} +{{- if hasKey .Values.cloudCollector.log.whitelist "cloudPlatform" }} + - --log-whitelist-cloud-platform={{ .Values.cloudCollector.log.whitelist.cloudPlatform }} +{{- end }} +{{- end }} +{{- end }} +{{- if .Values.cloudCollector.additionalArgs }} +{{ toYaml .Values.cloudCollector.additionalArgs | indent 10 }} +{{- end }} + env: + - name: "EBPF_NET_CLUSTER_NAME" + value: {{ tpl (.Values.clusterName | toYaml) . }} +{{- if hasKey .Values "debug" }} +{{- if (default false .Values.debug.enabled) }} + - name: "EBPF_NET_DEBUG_MODE" + value: "true" +{{- end }} +{{- end }} +{{- if (default false .Values.cloudCollector.runUnderGDB) }} + - name: "EBPF_NET_RUN_UNDER_GDB" + value: "gdb" +{{- end }} + - name: "EBPF_NET_INTAKE_HOST" + value: {{ include "opentelemetry-collector-reducer.fullname" . }} + - name: "EBPF_NET_INTAKE_PORT" + value: "{{ default 7000 .Values.reducer.service.ports.telemetry.port }}" +{{- if hasKey .Values "debug" }} +{{- if (default false .Values.debug.storeMinidump) }} + - name: "EBPF_NET_MINIDUMP_DIR" + value: "/hostfs/var/run/network-explorer/minidump" +{{- end }} +{{- end }} +{{- if .Values.cloudCollector.env }} +{{ toYaml .Values.cloudCollector.env | indent 10 }} +{{- end }} +{{- if hasKey .Values "debug" }} +{{- if (default false .Values.debug.storeMinidump) }} + volumeMounts: + - mountPath: /hostfs/var/run/network-explorer + name: host-var-run-network-explorer + readOnly: false + volumes: + - name: host-var-run-network-explorer + hostPath: + path: /var/run/network-explorer + type: DirectoryOrCreate +{{- end }} +{{- end }} + terminationGracePeriodSeconds: 30 + securityContext: {} + serviceAccountName: {{ include "opentelemetry-collector-cloud-collector.serviceAccountName" . }} + {{- with .Values.cloudCollector.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8}} + {{- end }} + {{- with .Values.cloudCollector.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.cloudCollector.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} +{{- end }} diff --git a/charts/opentelemetry-ebpf/templates/cloud-collector-serviceaccount.yaml b/charts/opentelemetry-ebpf/templates/cloud-collector-serviceaccount.yaml new file mode 100644 index 000000000..a5e1156cb --- /dev/null +++ b/charts/opentelemetry-ebpf/templates/cloud-collector-serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if and .Values.cloudCollector.enabled .Values.cloudCollector.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "opentelemetry-collector-cloud-collector.serviceAccountName" . }} + labels: + {{- include "opentelemetry-ebpf.labels" . | nindent 4 }} + {{- with .Values.cloudCollector.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end -}} diff --git a/charts/opentelemetry-ebpf/templates/configmap.yaml b/charts/opentelemetry-ebpf/templates/configmap.yaml new file mode 100644 index 000000000..42c4ab32c --- /dev/null +++ b/charts/opentelemetry-ebpf/templates/configmap.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "opentelemetry-ebpf.fullname" . }}-config + labels: + {{- include "opentelemetry-ebpf.labels" . | nindent 4 }} +data: + config.yaml: | + labels: + environment: {{ tpl (.Values.clusterName | toYaml) . }} diff --git a/charts/opentelemetry-ebpf/templates/k8s-collector-clusterrole.yaml b/charts/opentelemetry-ebpf/templates/k8s-collector-clusterrole.yaml new file mode 100644 index 000000000..1a4641174 --- /dev/null +++ b/charts/opentelemetry-ebpf/templates/k8s-collector-clusterrole.yaml @@ -0,0 +1,25 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "opentelemetry-collector-k8s-collector.fullname" . }} + labels: + {{- include "opentelemetry-ebpf.labels" . | nindent 4 }} +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch +- apiGroups: + - apps + resources: + - replicasets + verbs: + - get + - list + - watch +{{- end -}} diff --git a/charts/opentelemetry-ebpf/templates/k8s-collector-clusterrolebinding.yaml b/charts/opentelemetry-ebpf/templates/k8s-collector-clusterrolebinding.yaml new file mode 100644 index 000000000..7f4fbca5a --- /dev/null +++ b/charts/opentelemetry-ebpf/templates/k8s-collector-clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "opentelemetry-collector-k8s-collector.fullname" . }} + labels: + {{- include "opentelemetry-ebpf.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "opentelemetry-collector-k8s-collector.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ include "opentelemetry-collector-k8s-collector.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/opentelemetry-ebpf/templates/k8s-collector-deployment.yaml b/charts/opentelemetry-ebpf/templates/k8s-collector-deployment.yaml new file mode 100644 index 000000000..9234b5d2a --- /dev/null +++ b/charts/opentelemetry-ebpf/templates/k8s-collector-deployment.yaml @@ -0,0 +1,169 @@ +{{- if .Values.k8sCollector.enabled }} +# The k8s-collector consists of two services: +# 1) k8s-watcher: talks to the Kubernetes API server to determine the current state of +# the cluster; sets up watches to be notified of subsequent changes to pods, services +# and other resources. +# 2) k8s-relay: relays the information collected by k8s-watcher to the reducer. +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "opentelemetry-collector-k8s-collector.fullname" . }} + labels: + {{- include "opentelemetry-ebpf.labels" . | nindent 4 }} +spec: + progressDeadlineSeconds: 600 + replicas: 1 + revisionHistoryLimit: 3 + selector: + matchLabels: + app.kubernetes.io/name: {{ include "opentelemetry-collector-k8s-collector.fullname" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + strategy: + type: RollingUpdate + template: + metadata: + annotations: + # This is here to allow us to do "zero-downtime" updates without an image change. + rollingUpdateVersion: "1" + charts.flowmill.com/version: {{ .Chart.Version }} + labels: + app.kubernetes.io/name: {{ include "opentelemetry-collector-k8s-collector.fullname" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + spec: +{{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{- range .Values.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- end }} + containers: + - name: k8s-watcher + {{- if .Values.k8sCollector.watcher.image.tag }} + image: "{{ default .Values.image.registry .Values.k8sCollector.watcher.image.repository }}/{{ .Values.k8sCollector.watcher.image.name }}:{{ .Values.k8sCollector.watcher.image.tag }}" + {{- else }} + image: "{{ default .Values.image.registry .Values.k8sCollector.watcher.image.repository }}/{{ .Values.k8sCollector.watcher.image.name }}:{{ default .Chart.AppVersion .Values.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} +{{- if hasKey .Values.k8sCollector.watcher "resources" }} + resources: +{{ toYaml .Values.k8sCollector.watcher.resources | indent 10 }} +{{- end }} + args: + - --log-console + {{- if hasKey .Values.log "level" }} + - --log-level={{ .Values.log.level }} + {{- end }} + # k8s-relay, which is a service that the k8s-watcher talks to. + # Currently not configurable, has to be reachable on localhost:8172, so must + # share a pod with the k8s-watcher above. + - name: k8s-relay + {{- if .Values.k8sCollector.relay.image.tag }} + image: "{{ default .Values.image.registry .Values.k8sCollector.relay.image.registry }}/{{ .Values.k8sCollector.relay.image.name }}:{{ .Values.k8sCollector.relay.image.tag }}" + {{- else }} + image: "{{ default .Values.image.registry .Values.k8sCollector.relay.image.registry }}/{{ .Values.k8sCollector.relay.image.name }}:{{ default .Chart.AppVersion .Values.k8sCollector.relay.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + +{{- if hasKey .Values.k8sCollector.relay "resources" }} + resources: +{{ toYaml .Values.k8sCollector.relay.resources | indent 10 }} +{{- end }} + args: + - --config-file=/etc/network-explorer/config.yaml +{{- if .Values.log.console }} + - --log-console +{{- end }} +{{- if hasKey .Values.log "level" }} + - --{{ .Values.log.level }} +{{- end }} +{{- if hasKey .Values.k8sCollector.relay "log" -}} +{{- if hasKey .Values.k8sCollector.relay.log "whitelist" }} +{{- if hasKey .Values.k8sCollector.relay.log.whitelist "channel" }} + - --log-whitelist-channel={{ .Values.k8sCollector.relay.log.whitelist.channel }} +{{- end }} +{{- if hasKey .Values.k8sCollector.relay.log.whitelist "component" }} + - --log-whitelist-component={{ .Values.k8sCollector.relay.log.whitelist.component }} +{{- end }} +{{- if hasKey .Values.k8sCollector.relay.log.whitelist "cloudPlatform" }} + - --log-whitelist-cloud-platform={{ .Values.k8sCollector.relay.log.whitelist.cloudPlatform }} +{{- end }} +{{- end }} +{{- end }} +{{- if .Values.k8sCollector.relay.additionalArgs }} +{{ toYaml .Values.k8sCollector.relay.additionalArgs | indent 10 }} +{{- end }} + env: + - name: "EBPF_NET_CLUSTER_NAME" + value: {{ tpl (.Values.clusterName | toYaml) . }} +{{- if hasKey .Values "debug" }} +{{- if (default false .Values.debug.enabled) }} + - name: "EBPF_NET_DEBUG_MODE" + value: "true" +{{- end }} +{{- end }} +{{- if (default false .Values.k8sCollector.relay.runUnderGDB) }} + - name: "EBPF_NET_RUN_UNDER_GDB" + value: "gdb" +{{- end }} + - name: "EBPF_NET_INTAKE_HOST" + value: {{ include "opentelemetry-collector-reducer.fullname" . }} + - name: "EBPF_NET_INTAKE_PORT" + value: "{{ default 7000 .Values.reducer.service.ports.telemetry.port }}" +{{- if hasKey .Values "debug" }} +{{- if (default false .Values.debug.storeMinidump) }} + - name: "EBPF_NET_MINIDUMP_DIR" + value: "/hostfs/var/run/network-explorer/minidump" +{{- end }} +{{- end }} +{{- if and .Values.debug.enabled .Values.debug.sendUnplannedExitMetric }} + - name: "EBPF_NET_CRASH_METRIC_HOST" + value: {{ .Values.endpoint.address }} + - name: "EBPF_NET_CRASH_METRIC_PORT" + value: {{ .Values.endpoint.port }} +{{- end }} +{{- if .Values.k8sCollector.relay.env }} +{{ toYaml .Values.k8sCollector.relay.env | indent 10 }} +{{- end }} + volumeMounts: + - mountPath: /etc/network-explorer + name: k8s-relay-config +{{- if hasKey .Values "debug" }} +{{- if (default false .Values.debug.storeMinidump) }} + - mountPath: /hostfs/var/run/network-explorer + name: host-var-run-network-explorer + readOnly: false +{{- end }} +{{- end }} + terminationGracePeriodSeconds: 30 + volumes: +{{- if hasKey .Values "debug" }} +{{- if (default false .Values.debug.storeMinidump) }} + - name: host-var-run-network-explorer + hostPath: + path: /var/run/network-explorer + type: DirectoryOrCreate +{{- end }} +{{- end }} + - name: k8s-relay-config + projected: + sources: + - configMap: + name: {{ include "opentelemetry-ebpf.fullname" . }}-config + items: + - key: config.yaml + path: config.yaml + securityContext: {} + serviceAccountName: {{ include "opentelemetry-collector-k8s-collector.serviceAccountName" . }} + {{- with .Values.k8sCollector.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8}} + {{- end }} + {{- with .Values.k8sCollector.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.k8sCollector.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} +{{- end }} diff --git a/charts/opentelemetry-ebpf/templates/k8s-collector-serviceaccount.yaml b/charts/opentelemetry-ebpf/templates/k8s-collector-serviceaccount.yaml new file mode 100644 index 000000000..db5bd148e --- /dev/null +++ b/charts/opentelemetry-ebpf/templates/k8s-collector-serviceaccount.yaml @@ -0,0 +1,8 @@ +{{- if and .Values.k8sCollector.enabled .Values.k8sCollector.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "opentelemetry-collector-k8s-collector.serviceAccountName" . }} + labels: + {{- include "opentelemetry-ebpf.labels" . | nindent 4 }} +{{- end -}} diff --git a/charts/opentelemetry-ebpf/templates/kernel-collector-clusterrole.yaml b/charts/opentelemetry-ebpf/templates/kernel-collector-clusterrole.yaml new file mode 100644 index 000000000..d0675a991 --- /dev/null +++ b/charts/opentelemetry-ebpf/templates/kernel-collector-clusterrole.yaml @@ -0,0 +1,17 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "opentelemetry-collector-kernel-collector.fullname" . }} + labels: + {{- include "opentelemetry-ebpf.labels" . | nindent 4 }} +rules: +- apiGroups: + - policy + resourceNames: + - {{ include "opentelemetry-collector-kernel-collector.fullname" . }} + resources: + - podsecuritypolicies + verbs: + - use +{{- end -}} diff --git a/charts/opentelemetry-ebpf/templates/kernel-collector-clusterrolebinding.yaml b/charts/opentelemetry-ebpf/templates/kernel-collector-clusterrolebinding.yaml new file mode 100644 index 000000000..ca40a54f1 --- /dev/null +++ b/charts/opentelemetry-ebpf/templates/kernel-collector-clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "opentelemetry-collector-kernel-collector.fullname" . }} + labels: + {{- include "opentelemetry-ebpf.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "opentelemetry-collector-kernel-collector.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ include "opentelemetry-collector-kernel-collector.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/opentelemetry-ebpf/templates/kernel-collector-daemonset.yaml b/charts/opentelemetry-ebpf/templates/kernel-collector-daemonset.yaml new file mode 100644 index 000000000..01505798f --- /dev/null +++ b/charts/opentelemetry-ebpf/templates/kernel-collector-daemonset.yaml @@ -0,0 +1,200 @@ +{{- if .Values.kernelCollector.enabled }} +# kernel collector daemonset: deploys the kernel collector to each node in the cluster. +# The kernel collector needs to be able to compile and install +# eBPF programs in the node's kernel, so needs to run as root and +# needs to mount /lib/modules and /usr/src from the node itself. +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ include "opentelemetry-collector-kernel-collector.fullname" . }} + labels: + {{- include "opentelemetry-ebpf.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ include "opentelemetry-collector-kernel-collector.fullname" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + annotations: + charts.flowmill.com/version: {{ .Chart.Version }} + labels: + app.kubernetes.io/name: {{ include "opentelemetry-collector-kernel-collector.fullname" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + spec: +{{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{- range .Values.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- end }} + containers: + - name: kernel-collector + {{- if .Values.kernelCollector.image.tag }} + image: "{{ default .Values.image.registry .Values.kernelCollector.image.registry }}/{{ .Values.kernelCollector.image.name }}:{{ .Values.kernelCollector.image.tag }}" + {{- else }} + image: "{{ default .Values.image.registry .Values.kernelCollector.image.registry }}/{{ .Values.kernelCollector.image.name }}:{{ default .Chart.AppVersion .Values.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - --config-file=/etc/network-explorer/config.yaml +{{- if .Values.kernelCollector.bpfDumpFile }} + - --bpf-dump-file={{ .Values.kernelCollector.bpfDumpFile }} +{{- end }} +{{- if (default false .Values.kernelCollector.useDockerMetadata) }} + - --force-docker-metadata +{{- end }} +{{- if not (default false .Values.kernelCollector.collectNomadMetadata) }} + - --disable-nomad-metadata +{{- end }} +{{- if .Values.kernelCollector.userlandTcp }} + - --enable-userland-tcp +{{- end }} +{{- if .Values.log.console }} + - --log-console +{{- end }} +{{- if hasKey .Values.log "level" }} + - --{{ .Values.log.level }} +{{- end }} +{{- if hasKey .Values.kernelCollector "log" }} +{{- if hasKey .Values.kernelCollector.log "whitelist" }} +{{- if hasKey .Values.kernelCollector.log.whitelist "agentLog" }} + - --log-whitelist-agent-log={{ .Values.kernelCollector.log.whitelist.agentLog }} +{{- end }} +{{- if hasKey .Values.kernelCollector.log.whitelist "channel" }} + - --log-whitelist-channel={{ .Values.kernelCollector.log.whitelist.channel }} +{{- end }} +{{- if hasKey .Values.kernelCollector.log.whitelist "component" }} + - --log-whitelist-component={{ .Values.kernelCollector.log.whitelist.component }} +{{- end }} +{{- if hasKey .Values.kernelCollector.log.whitelist "cloudPlatform" }} + - --log-whitelist-cloud-platform={{ .Values.kernelCollector.log.whitelist.cloudPlatform }} +{{- end }} +{{- end }} +{{- end }} +{{- if .Values.kernelCollector.additionalArgs }} +{{ toYaml .Values.kernelCollector.additionalArgs | indent 12 }} +{{- end }} + # TODO: liveness probe + env: + - name: "EBPF_NET_CLUSTER_NAME" + value: {{ tpl (.Values.clusterName | toYaml) . }} +{{- if hasKey .Values "debug" }} +{{- if (default false .Values.debug.enabled) }} + - name: "EBPF_NET_DEBUG_MODE" + value: "true" +{{- end }} +{{- end }} + - name: "EBPF_NET_DISABLE_HTTP_METRICS" + value: {{ quote (default false .Values.kernelCollector.disableHttpMetrics) }} +{{- if (default false .Values.kernelCollector.runUnderGDB) }} + - name: "EBPF_NET_RUN_UNDER_GDB" + value: "gdb" +{{- end }} + - name: "EBPF_NET_KERNEL_HEADERS_AUTO_FETCH" + value: {{ quote (default true .Values.kernelCollector.fetchKernelHeaders) }} + - name: "EBPF_NET_INTAKE_HOST" + value: {{ include "opentelemetry-collector-reducer.fullname" . }} + - name: "EBPF_NET_INTAKE_PORT" + value: "{{ default 7000 .Values.reducer.service.ports.telemetry.port }}" +{{- if .Values.kernelCollector.exportBpfSrcFile }} + - name: "EBPF_NET_EXPORT_BPF_SRC_FILE" + value: {{ quote .Values.kernelCollector.exportBpfSrcFile }} +{{- end }} + - name: "EBPF_NET_HOST_DIR" + value: "/hostfs" +{{- if hasKey .Values "debug" }} +{{- if (default false .Values.debug.storeMinidump) }} + - name: "EBPF_NET_MINIDUMP_DIR" + value: "/hostfs/var/run/network-explorer/minidump" +{{- end }} +{{- end }} +{{- if and .Values.debug.enabled .Values.debug.sendUnplannedExitMetric }} + - name: "EBPF_NET_CRASH_METRIC_HOST" + value: {{ .Values.endpoint.address }} + - name: "EBPF_NET_CRASH_METRIC_PORT" + value: {{ .Values.endpoint.port }} +{{- end }} +{{- if .Values.kernelCollector.env }} +{{ toYaml .Values.kernelCollector.env | indent 12 }} +{{- end }} +{{- if .Values.kernelCollector.resources }} + resources: +{{ toYaml .Values.kernelCollector.resources | indent 12 }} +{{- end }} + securityContext: + privileged: true + volumeMounts: + - mountPath: /hostfs/ + name: host-root + readOnly: true +{{- if (default true .Values.kernelCollector.fetchKernelHeaders) }} + - mountPath: /hostfs/var/cache + name: host-var-cache + readOnly: false +{{- end }} + - mountPath: /etc/network-explorer + name: {{ include "opentelemetry-ebpf.fullname" . }}-config + readOnly: true +{{- if (default false .Values.kernelCollector.useDockerMetadata) }} + - mountPath: /var/run/docker.sock + name: docker-sock + readOnly: false +{{- end }} +{{- if hasKey .Values "debug" }} +{{- if (default false .Values.debug.storeMinidump) }} + - mountPath: /hostfs/var/run/network-explorer + name: host-var-run-network-explorer + readOnly: false +{{- end }} +{{- end }} + dnsPolicy: ClusterFirstWithHostNet + hostNetwork: true + hostPID: true + serviceAccountName: {{ include "opentelemetry-collector-kernel-collector.serviceAccountName" . }} + volumes: + - name: {{ include "opentelemetry-ebpf.fullname" . }}-config + projected: + sources: + - configMap: + name: {{ include "opentelemetry-ebpf.fullname" . }}-config + items: + - key: config.yaml + path: config.yaml + - name: host-root + hostPath: + path: / + type: Directory +{{- if (default true .Values.kernelCollector.fetchKernelHeaders) }} + - name: host-var-cache + hostPath: + path: /var/cache + type: DirectoryOrCreate +{{- end }} +{{- if hasKey .Values "debug" }} +{{- if (default false .Values.debug.storeMinidump) }} + - name: host-var-run-network-explorer + hostPath: + path: /var/run/network-explorer + type: DirectoryOrCreate +{{- end }} +{{- end }} +{{- if (default false .Values.kernelCollector.useDockerMetadata) }} + - name: docker-sock + hostPath: + path: /var/run/docker.sock + type: Socket +{{- end }} + {{- with .Values.kernelCollector.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.kernelCollector.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.kernelCollector.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} +{{- end }} diff --git a/charts/opentelemetry-ebpf/templates/kernel-collector-serviceaccount.yaml b/charts/opentelemetry-ebpf/templates/kernel-collector-serviceaccount.yaml new file mode 100644 index 000000000..456136d5b --- /dev/null +++ b/charts/opentelemetry-ebpf/templates/kernel-collector-serviceaccount.yaml @@ -0,0 +1,8 @@ +{{- if and .Values.kernelCollector.enabled .Values.kernelCollector.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "opentelemetry-collector-kernel-collector.serviceAccountName" . }} + labels: + {{- include "opentelemetry-ebpf.labels" . | nindent 4 }} +{{- end -}} diff --git a/charts/opentelemetry-ebpf/templates/reducer-deployment.yaml b/charts/opentelemetry-ebpf/templates/reducer-deployment.yaml new file mode 100644 index 000000000..6a704aa49 --- /dev/null +++ b/charts/opentelemetry-ebpf/templates/reducer-deployment.yaml @@ -0,0 +1,125 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "opentelemetry-collector-reducer.fullname" . }} + labels: + {{- include "opentelemetry-ebpf.labels" . | nindent 4 }} +spec: + replicas: 1 + strategy: + type: RollingUpdate + selector: + matchLabels: + app.kubernetes.io/name: {{ include "opentelemetry-collector-reducer.fullname" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "opentelemetry-collector-reducer.fullname" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + spec: +{{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{- range .Values.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- end }} + containers: + - name: reducer + {{- if .Values.reducer.image.tag }} + image: "{{ default .Values.image.registry .Values.reducer.image.registry }}/{{ .Values.reducer.image.name }}:{{ .Values.reducer.image.tag }}" + {{- else }} + image: "{{ default .Values.image.registry .Values.reducer.image.registry }}/{{ .Values.reducer.image.name }}:{{ default .Chart.AppVersion .Values.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - --port={{- .Values.reducer.service.ports.telemetry.containerPort }} + - --log-console + - --no-log-file + {{- if hasKey .Values.log "level" }} + - --{{ .Values.log.level }} + {{- end }} + - --enable-aws-enrichment + - --disable-prometheus-metrics + - --enable-otlp-grpc-metrics + - --otlp-grpc-metrics-host={{ .Values.endpoint.address }} + - --otlp-grpc-metrics-port={{ .Values.endpoint.port }} + {{- if .Values.reducer.disableMetrics }} + - --disable-metrics={{join "," .Values.reducer.disableMetrics}} + {{- end }} + {{- if .Values.reducer.enableMetrics }} + - --enable-metrics={{join "," .Values.reducer.enableMetrics}} + {{- end }} + {{- if .Values.reducer.ingestShards }} + - --num-ingest-shards={{- .Values.reducer.ingestShards }} + {{- end }} + {{- if .Values.reducer.matchingShards }} + - --num-matching-shards={{- .Values.reducer.matchingShards }} + {{- end }} + {{- if .Values.reducer.aggregationShards }} + - --num-aggregation-shards={{- .Values.reducer.aggregationShards }} + {{- end }} + {{- if .Values.reducer.extraArgs }} + {{- toYaml .Values.reducer.extraArgs | nindent 12 }} + {{- end }} + {{- if .Values.debug.enabled }} + env: + {{- if (default false .Values.debug.storeMinidump) }} + - name: "EBPF_NET_MINIDUMP_DIR" + value: "/hostfs/var/run/network-explorer/minidump" + {{- end }} + {{- if .Values.debug.sendUnplannedExitMetric }} + - name: "EBPF_NET_CRASH_METRIC_HOST" + value: {{ .Values.endpoint.address }} + - name: "EBPF_NET_CRASH_METRIC_PORT" + value: {{ .Values.endpoint.port }} + {{- end }} + {{- end }} + ports: + - name: telemetry + containerPort: {{ .Values.reducer.service.ports.telemetry.containerPort }} + protocol: TCP + {{- if not .Values.reducer.disableInternalMetrics }} + - name: stats + containerPort: {{ .Values.reducer.service.ports.stats.containerPort }} + protocol: TCP + {{- end }} + {{- if .Values.debug.enabled }} + volumeMounts: + {{- if (default false .Values.debug.storeMinidump) }} + - mountPath: /hostfs/var/run/network-explorer + name: host-var-run-network-explorer + readOnly: false + {{- end }} + {{- end }} + readinessProbe: + initialDelaySeconds: 5 + periodSeconds: 20 + timeoutSeconds: 5 + exec: + command: ['/srv/health_check.sh', 'readiness_probe', 'localhost', {{ quote .Values.reducer.service.ports.telemetry.containerPort }}] + {{- if .Values.resources }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- with .Values.reducer.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.reducer.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.reducer.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.debug.enabled }} + volumes: + {{- if (default false .Values.debug.storeMinidump) }} + - name: host-var-run-network-explorer + hostPath: + path: /var/run/network-explorer + type: DirectoryOrCreate + {{- end }} + {{- end }} diff --git a/charts/opentelemetry-ebpf/templates/reducer-service.yaml b/charts/opentelemetry-ebpf/templates/reducer-service.yaml new file mode 100644 index 000000000..3166dd330 --- /dev/null +++ b/charts/opentelemetry-ebpf/templates/reducer-service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "opentelemetry-collector-reducer.fullname" . }} + labels: + {{- include "opentelemetry-ebpf.labels" . | nindent 4 }} +spec: + type: {{ .Values.reducer.service.type }} + selector: + app.kubernetes.io/name: {{ include "opentelemetry-collector-reducer.fullname" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- $ports := include "opentelemetry-collector-reducer.servicePortsConfig" . }} + {{- if $ports }} + ports: + {{- $ports | nindent 4}} + {{- end }} diff --git a/charts/opentelemetry-ebpf/values.schema.json b/charts/opentelemetry-ebpf/values.schema.json new file mode 100644 index 000000000..e77a22c89 --- /dev/null +++ b/charts/opentelemetry-ebpf/values.schema.json @@ -0,0 +1,163 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "title": "Values", + "additionalProperties": false, + "properties": { + "global": { + "type": "object" + }, + "nameOverride": { + "description": "Override name of the chart used in Kubernetes object names.", + "type": "string" + }, + "fullnameOverride": { + "description": "Override fully qualified app name.", + "type": "string" + }, + "clusterName": { + "description": "Cluster Name", + "type": "string" + }, + "endpoint": { + "type": "object", + "additionalProperties": false, + "required": [ + "address" + ], + "properties": { + "address": { + "type": "string" + }, + "port": { + "type": "integer" + } + } + }, + "resources": { + "type": "object", + "additionalProperties": false, + "properties": { + "limits": { + "type": "object", + "additionalProperties": false, + "properties": { + "cpu": { + "type": [ + "string", + "integer" + ] + }, + "memory": { + "type": "string" + } + } + }, + "requests": { + "type": "object", + "additionalProperties": false, + "properties": { + "cpu": { + "type": [ + "string", + "integer" + ] + }, + "memory": { + "type": "string" + } + } + } + } + }, + "image": { + "description": "images", + "type": "object", + "additionalProperties": false, + "properties": { + "tag": { + "type": "string" + }, + "registry": { + "type": "string" + }, + "pullPolicy": { + "type": "string", + "enum": [ + "IfNotPresent", + "Always", + "Never" + ] + } + } + }, + "imagePullSecrets": { + "type": "array", + "items": { + "type": "string" + } + }, + "log": { + "type": "object", + "additionalProperties": false, + "properties": { + "console": { + "type": "boolean" + }, + "level": { + "type": "string", + "enum": [ + "error", + "warning", + "info", + "debug", + "trace" + ] + } + } + }, + "debug": { + "type": "object", + "additionalProperties": false, + "properties": { + "enabled": { + "type": "boolean" + }, + "storeMinidump": { + "type": "boolean" + }, + "sendUnplannedExitMetric": { + "type": "boolean" + } + } + }, + "kernelCollector": { + "type": "object", + "additionalProperties": true + }, + "cloudCollector": { + "type": "object", + "additionalProperties": true + }, + "k8sCollector": { + "type": "object", + "additionalProperties": true + }, + "reducer": { + "type": "object", + "additionalProperties": true + }, + "rbac": { + "type": "object", + "additionalProperties": false, + "properties": { + "create": { + "type": "boolean" + } + } + } + }, + "required": [ + "clusterName" + ] +} \ No newline at end of file diff --git a/charts/opentelemetry-ebpf/values.yaml b/charts/opentelemetry-ebpf/values.yaml new file mode 100644 index 000000000..c0bd9fabb --- /dev/null +++ b/charts/opentelemetry-ebpf/values.yaml @@ -0,0 +1,275 @@ +# Default values for opentelemetry-ebpf. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +nameOverride: "" +fullnameOverride: "" +clusterName: "" + +image: + tag: "" + registry: otel + pullPolicy: IfNotPresent + +imagePullSecrets: [] + +resources: {} + +# OTLP gRPC endpoint to send the collected metrics +endpoint: + address: "" + port: 4317 + +log: + console: false + # possible values: { error | warning | info | debug | trace } + level: warning + +debug: + enabled: false + storeMinidump: false + sendUnplannedExitMetric: false + +kernelCollector: + enabled: true + serviceAccount: + create: true + name: "" + image: + registry: "" + tag: "" + name: opentelemetry-ebpf-kernel-collector + + nodeSelector: {} + disableHttpMetrics: false + + tolerations: + - operator: "Exists" + effect: "NoExecute" + - operator: "Exists" + effect: "NoSchedule" + + affinity: {} + resources: {} + + # uncomment the line below to disable automatic kernel headers fetching + # fetchKernelHeaders: false + + # uncomment to enable enrichment using Docker metadata + # useDockerMetadata: true + + # uncomment to enable enrichment using Nomad metadata (https://www.nomadproject.io/) + # collectNomadMetadata: true + +cloudCollector: + enabled: false + image: + registry: "" + tag: "" + name: opentelemetry-ebpf-cloud-collector + + serviceAccount: + create: true + name: "" + annotations: {} + ## eks.amazonaws.com/role-arn: "role-arn-name" + + tolerations: [] + affinity: {} + +k8sCollector: + enabled: true + serviceAccount: + create: true + name: "" + relay: + image: + registry: "" + tag: "" + name: opentelemetry-ebpf-k8s-relay + watcher: + image: + registry: "" + tag: "" + name: opentelemetry-ebpf-k8s-watcher + + tolerations: [] + affinity: {} + +reducer: + image: + registry: "" + tag: "" + name: opentelemetry-ebpf-reducer + extraArgs: {} + ingestShards: 1 + matchingShards: 1 + aggregationShards: 1 + disableInternalMetrics: true + disableMetrics: [] + ### to disable an entire metric category: ### + # - tcp.all + # - udp.all + # - dns.all + # - http.all + ### to disable an individual metric: ### + ### tcp ### + # - tcp.bytes + # - tcp.rtt.num_measurements + # - tcp.active + # - tcp.rtt.average + # - tcp.packets + # - tcp.retrans + # - tcp.syn_timeouts + # - tcp.new_sockets + # - tcp.resets + ### udp ### + # - udp.bytes + # - udp.packets + # - udp.active + # - udp.drops + ### dns ### + # - dns.client.duration.average + # - dns.server.duration.average + # - dns.active_sockets + # - dns.responses + # - dns.timeouts + ### http ## + # - http.client.duration.average + # - http.server.duration.average + # - http.active_sockets + # - http.status_code + ### ebpf_net ## + # - ebpf_net.span_utilization_fraction + # - ebpf_net.pipeline_metric_bytes_discarded + # - ebpf_net.codetiming_min_ns + # - ebpf_net.entrypoint_info + # - ebpf_net.otlp_grpc.requests_sent + # - ebpf_net.connections + # - ebpf_net.rpc_queue_elem_utilization_fraction + # - ebpf_net.disconnects + # - ebpf_net.codetiming_avg_ns + # - ebpf_net.client_handle_pool + # - ebpf_net.otlp_grpc.successful_requests + # - ebpf_net.span_utilization + # - ebpf_net.up + # - ebpf_net.rpc_queue_buf_utilization_fraction + # - ebpf_net.collector_log_count + # - ebpf_net.time_since_last_message_ns + # - ebpf_net.bpf_log + # - ebpf_net.codetiming_count + # - ebpf_net.message + # - ebpf_net.otlp_grpc.bytes_sent + # - ebpf_net.pipeline_message_error + # - ebpf_net.pipeline_metric_bytes_written + # - ebpf_net.codetiming_max_ns + # - ebpf_net.span_utilization_max + # - ebpf_net.client_handle_pool_fraction + # - ebpf_net.span_utilization_fraction + # - ebpf_net.rpc_latency_ns + # - ebpf_net.agg_root_truncation + # - ebpf_net.clock_offset_ns + # - ebpf_net.otlp_grpc.metrics_sent + # - ebpf_net.otlp_grpc.unknown_response_tags + # - ebpf_net.collector_health + # - ebpf_net.codetiming_sum_ns + # - ebpf_net.otlp_grpc.failed_requests + # - ebpf_net.rpc_queue_buf_utilization + ### to enable all metrics (including metrics turned off by default): ### + # - none + enableMetrics: [] + ### Disable metrics flag is evaluated first and only then enable metric flag is evaluated. ### + ### to enable an entire metric category: ### + # - tcp.all + # - udp.all + # - dns.all + # - http.all + # - ebpf_net.all + ### to enable an individual metric: ### + ### tcp ### + # - tcp.bytes + # - tcp.rtt.num_measurements + # - tcp.active + # - tcp.rtt.average + # - tcp.packets + # - tcp.retrans + # - tcp.syn_timeouts + # - tcp.new_sockets + # - tcp.resets + ### udp ### + # - udp.bytes + # - udp.packets + # - udp.active + # - udp.drops + ### dns ### + # - dns.client.duration.average + # - dns.server.duration.average + # - dns.active_sockets + # - dns.responses + # - dns.timeouts + ### http ### + # - http.client.duration.average + # - http.server.duration.average + # - http.active_sockets + # - http.status_code + ### ebpf_net ### + # - ebpf_net.span_utilization_fraction + # - ebpf_net.pipeline_metric_bytes_discarded + # - ebpf_net.codetiming_min_ns + # - ebpf_net.entrypoint_info + # - ebpf_net.otlp_grpc.requests_sent + # - ebpf_net.connections + # - ebpf_net.rpc_queue_elem_utilization_fraction + # - ebpf_net.disconnects + # - ebpf_net.codetiming_avg_ns + # - ebpf_net.client_handle_pool + # - ebpf_net.otlp_grpc.successful_requests + # - ebpf_net.span_utilization + # - ebpf_net.up + # - ebpf_net.rpc_queue_buf_utilization_fraction + # - ebpf_net.collector_log_count + # - ebpf_net.time_since_last_message_ns + # - ebpf_net.bpf_log + # - ebpf_net.codetiming_count + # - ebpf_net.message + # - ebpf_net.otlp_grpc.bytes_sent + # - ebpf_net.pipeline_message_error + # - ebpf_net.pipeline_metric_bytes_written + # - ebpf_net.codetiming_max_ns + # - ebpf_net.span_utilization_max + # - ebpf_net.client_handle_pool_fraction + # - ebpf_net.span_utilization_fraction + # - ebpf_net.rpc_latency_ns + # - ebpf_net.agg_root_truncation + # - ebpf_net.clock_offset_ns + # - ebpf_net.otlp_grpc.metrics_sent + # - ebpf_net.otlp_grpc.unknown_response_tags + # - ebpf_net.collector_health + # - ebpf_net.codetiming_sum_ns + # - ebpf_net.otlp_grpc.failed_requests + # - ebpf_net.rpc_queue_buf_utilization + + resources: {} + nodeSelector: {} + tolerations: [] + affinity: {} + service: + type: ClusterIP + ports: + telemetry: + enabled: true + servicePort: 7000 + containerPort: 7000 + targetPort: 7000 + protocol: TCP + appProtocol: http + stats: + enabled: true + servicePort: 7001 + containerPort: 7001 + targetPort: 7001 + protocol: TCP + appProtocol: http + +rbac: + create: true