diff --git a/.github/workflows/operator-test.yaml b/.github/workflows/operator-test.yaml index a23ca5740..ce11449fe 100644 --- a/.github/workflows/operator-test.yaml +++ b/.github/workflows/operator-test.yaml @@ -45,7 +45,7 @@ jobs: LOCAL_TARGET_ALLOCATOR_IMG="local\/opentelemetry-operator-targetallocator:e2e" PUBLIC_TARGET_ALLOCATOR_IMG="ghcr.io\/open-telemetry\/opentelemetry-operator\/target-allocator:0.1.0" - sed -i "s/$LOCAL_TARGET_ALLOCATOR_IMG/${PUBLIC_TARGET_ALLOCATOR_IMG}/g" ./opentelemetry-operator/tests/e2e/smoke-targetallocator/00-install.yaml + sed -i "s/$LOCAL_TARGET_ALLOCATOR_IMG/${PUBLIC_TARGET_ALLOCATOR_IMG}/g" ./opentelemetry-operator/tests/e2e/smoke-targetallocator/*.yaml sed -i "s/$LOCAL_TARGET_ALLOCATOR_IMG/${PUBLIC_TARGET_ALLOCATOR_IMG}/g" ./opentelemetry-operator/tests/e2e/targetallocator-features/00-install.yaml sed -i "s/$LOCAL_TARGET_ALLOCATOR_IMG/${PUBLIC_TARGET_ALLOCATOR_IMG}/g" ./opentelemetry-operator/tests/e2e/prometheus-config-validation/*.yaml diff --git a/charts/opentelemetry-operator/Chart.yaml b/charts/opentelemetry-operator/Chart.yaml index 29103a3f1..62191d7da 100644 --- a/charts/opentelemetry-operator/Chart.yaml +++ b/charts/opentelemetry-operator/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: opentelemetry-operator -version: 0.34.1 +version: 0.35.0 description: OpenTelemetry Operator Helm chart for Kubernetes type: application home: https://opentelemetry.io/ @@ -11,4 +11,4 @@ maintainers: - name: dmitryax - name: TylerHelmuth icon: https://raw.githubusercontent.com/cncf/artwork/a718fa97fffec1b9fd14147682e9e3ac0c8817cb/projects/opentelemetry/icon/color/opentelemetry-icon-color.png -appVersion: 0.81.0 +appVersion: 0.82.0 diff --git a/charts/opentelemetry-operator/crds/crd-opentelemetrycollector.yaml b/charts/opentelemetry-operator/crds/crd-opentelemetrycollector.yaml index 4e925e612..04d9bdc2c 100644 --- a/charts/opentelemetry-operator/crds/crd-opentelemetrycollector.yaml +++ b/charts/opentelemetry-operator/crds/crd-opentelemetrycollector.yaml @@ -35,6 +35,10 @@ spec: - jsonPath: .status.image name: Image type: string + - description: Management State + jsonPath: .spec.managementState + name: Management + type: string name: v1alpha1 schema: openAPIV3Schema: @@ -2918,6 +2922,14 @@ spec: format: int32 type: integer type: object + managementState: + default: managed + description: ManagementState defines if the CR should be managed by + the operator or not. Default is managed. + enum: + - managed + - unmanaged + type: string maxReplicas: description: 'MaxReplicas sets an upper bound to the autoscaling feature. If MaxReplicas is set autoscaling is enabled. Deprecated: use "OpenTelemetryCollector.Spec.Autoscaler.MaxReplicas" @@ -2946,6 +2958,19 @@ spec: This is only relevant to daemonset, statefulset, and deployment mode type: object + observability: + description: ObservabilitySpec defines how telemetry data gets handled. + properties: + metrics: + description: Metrics defines the metrics configuration for operands. + properties: + enableMetrics: + description: EnableMetrics specifies if ServiceMonitors should + be created for the OpenTelemetry Collector. The operator.observability.prometheus + feature gate must be enabled to use this feature. + type: boolean + type: object + type: object podAnnotations: additionalProperties: type: string @@ -3424,6 +3449,122 @@ spec: description: Enabled indicates whether to use a target allocation mechanism for Prometheus targets or not. type: boolean + env: + description: ENV vars to set on the OpenTelemetry TargetAllocator's + Pods. These can then in certain cases be consumed in the config + file for the TargetAllocator. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. If + a variable cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced to a single + $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array filterStrategy: description: FilterStrategy determines how to filter targets before allocating them among the collectors. The only current option @@ -3434,6 +3575,12 @@ spec: description: Image indicates the container image to use for the OpenTelemetry TargetAllocator. type: string + nodeSelector: + additionalProperties: + type: string + description: NodeSelector to schedule OpenTelemetry TargetAllocator + pods. + type: object prometheusCR: description: PrometheusCR defines the configuration for the retrieval of PrometheusOperator CRDs ( servicemonitor.monitoring.coreos.com/v1 @@ -3453,6 +3600,12 @@ spec: the map is going to exactly match a label in a PodMonitor's meta labels. The requirements are ANDed. type: object + scrapeInterval: + default: 30s + description: "Interval between consecutive scrapes. Equivalent + to the same setting on the Prometheus CRD. \n Default: \"30s\"" + format: duration + type: string serviceMonitorSelector: additionalProperties: type: string @@ -3525,6 +3678,188 @@ spec: service account to use with this instance. When set, the operator will not automatically create a ServiceAccount for the TargetAllocator. type: string + topologySpreadConstraints: + description: TopologySpreadConstraints embedded kubernetes pod + configuration option, controls how pods are spread across your + cluster among failure-domains such as regions, zones, nodes, + and other user-defined topology domains https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine + the number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: "MatchLabelKeys is a set of pod label keys + to select the pods over which spreading will be calculated. + The keys are used to lookup values from the incoming pod + labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading + will be calculated for the incoming pod. The same key + is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't + set. Keys that don't exist in the incoming pod labels + will be ignored. A null or empty list means only match + against labelSelector. \n This is a beta field and requires + the MatchLabelKeysInPodTopologySpread feature gate to + be enabled (enabled by default)." + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: 'MaxSkew describes the degree to which pods + may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, + it is the maximum permitted difference between the number + of matching pods in the target topology and the global + minimum. The global minimum is the minimum number of matching + pods in an eligible domain or zero if the number of eligible + domains is less than MinDomains. For example, in a 3-zone + cluster, MaxSkew is set to 1, and pods with the same labelSelector + spread as 2/2/1: In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | | P P | P P | P | - + if MaxSkew is 1, incoming pod can only be scheduled to + zone3 to become 2/2/2; scheduling it onto zone1(zone2) + would make the ActualSkew(3-1) on zone1(zone2) violate + MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled + onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, + it is used to give higher precedence to topologies that + satisfy it. It''s a required field. Default value is 1 + and 0 is not allowed.' + format: int32 + type: integer + minDomains: + description: "MinDomains indicates a minimum number of eligible + domains. When the number of eligible domains with matching + topology keys is less than minDomains, Pod Topology Spread + treats \"global minimum\" as 0, and then the calculation + of Skew is performed. And when the number of eligible + domains with matching topology keys equals or greater + than minDomains, this value has no effect on scheduling. + As a result, when the number of eligible domains is less + than minDomains, scheduler won't schedule more than maxSkew + Pods to those domains. If value is nil, the constraint + behaves as if MinDomains is equal to 1. Valid values are + integers greater than 0. When value is not nil, WhenUnsatisfiable + must be DoNotSchedule. \n For example, in a 3-zone cluster, + MaxSkew is set to 2, MinDomains is set to 5 and pods with + the same labelSelector spread as 2/2/2: | zone1 | zone2 + | zone3 | | P P | P P | P P | The number of domains + is less than 5(MinDomains), so \"global minimum\" is treated + as 0. In this situation, new pod with the same labelSelector + cannot be scheduled, because computed skew will be 3(3 + - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. \n This is a beta field and requires + the MinDomainsInPodTopologySpread feature gate to be enabled + (enabled by default)." + format: int32 + type: integer + nodeAffinityPolicy: + description: "NodeAffinityPolicy indicates how we will treat + Pod's nodeAffinity/nodeSelector when calculating pod topology + spread skew. Options are: - Honor: only nodes matching + nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes + are included in the calculations. \n If this value is + nil, the behavior is equivalent to the Honor policy. This + is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread + feature flag." + type: string + nodeTaintsPolicy: + description: "NodeTaintsPolicy indicates how we will treat + node taints when calculating pod topology spread skew. + Options are: - Honor: nodes without taints, along with + tainted nodes for which the incoming pod has a toleration, + are included. - Ignore: node taints are ignored. All nodes + are included. \n If this value is nil, the behavior is + equivalent to the Ignore policy. This is a beta-level + feature default enabled by the NodeInclusionPolicyInPodTopologySpread + feature flag." + type: string + topologyKey: + description: TopologyKey is the key of node labels. Nodes + that have a label with this key and identical values are + considered to be in the same topology. We consider each + as a "bucket", and try to put balanced number + of pods into each bucket. We define a domain as a particular + instance of a topology. Also, we define an eligible domain + as a domain whose nodes meet the requirements of nodeAffinityPolicy + and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", + each Node is a domain of that topology. And, if TopologyKey + is "topology.kubernetes.io/zone", each zone is a domain + of that topology. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with + a pod if it doesn''t satisfy the spread constraint. - + DoNotSchedule (default) tells the scheduler not to schedule + it. - ScheduleAnyway tells the scheduler to schedule the + pod in any location, but giving higher precedence to topologies + that would help reduce the skew. A constraint is considered + "Unsatisfiable" for an incoming pod if and only if every + possible node assignment for that pod would violate "MaxSkew" + on some topology. For example, in a 3-zone cluster, MaxSkew + is set to 1, and pods with the same labelSelector spread + as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming + pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) + as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). + In other words, the cluster can still be imbalanced, but + scheduler won''t make it *more* imbalanced. It''s a required + field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array type: object terminationGracePeriodSeconds: description: Duration in seconds the pod needs to terminate gracefully @@ -3573,6 +3908,182 @@ spec: type: string type: object type: array + topologySpreadConstraints: + description: TopologySpreadConstraints embedded kubernetes pod configuration + option, controls how pods are spread across your cluster among failure-domains + such as regions, zones, nodes, and other user-defined topology domains + https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + This is only relevant to statefulset, and deployment mode + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. Pods + that match this label selector are counted to determine the + number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists or + DoesNotExist, the values array must be empty. This + array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: "MatchLabelKeys is a set of pod label keys to select + the pods over which spreading will be calculated. The keys + are used to lookup values from the incoming pod labels, those + key-value labels are ANDed with labelSelector to select the + group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in + both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot + be set when LabelSelector isn't set. Keys that don't exist + in the incoming pod labels will be ignored. A null or empty + list means only match against labelSelector. \n This is a + beta field and requires the MatchLabelKeysInPodTopologySpread + feature gate to be enabled (enabled by default)." + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: 'MaxSkew describes the degree to which pods may + be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, + it is the maximum permitted difference between the number + of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods + in an eligible domain or zero if the number of eligible domains + is less than MinDomains. For example, in a 3-zone cluster, + MaxSkew is set to 1, and pods with the same labelSelector + spread as 2/2/1: In this case, the global minimum is 1. | + zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew + is 1, incoming pod can only be scheduled to zone3 to become + 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) + on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming + pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, + it is used to give higher precedence to topologies that satisfy + it. It''s a required field. Default value is 1 and 0 is not + allowed.' + format: int32 + type: integer + minDomains: + description: "MinDomains indicates a minimum number of eligible + domains. When the number of eligible domains with matching + topology keys is less than minDomains, Pod Topology Spread + treats \"global minimum\" as 0, and then the calculation of + Skew is performed. And when the number of eligible domains + with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. As a result, when + the number of eligible domains is less than minDomains, scheduler + won't schedule more than maxSkew Pods to those domains. If + value is nil, the constraint behaves as if MinDomains is equal + to 1. Valid values are integers greater than 0. When value + is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For + example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains + is set to 5 and pods with the same labelSelector spread as + 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | + The number of domains is less than 5(MinDomains), so \"global + minimum\" is treated as 0. In this situation, new pod with + the same labelSelector cannot be scheduled, because computed + skew will be 3(3 - 0) if new Pod is scheduled to any of the + three zones, it will violate MaxSkew. \n This is a beta field + and requires the MinDomainsInPodTopologySpread feature gate + to be enabled (enabled by default)." + format: int32 + type: integer + nodeAffinityPolicy: + description: "NodeAffinityPolicy indicates how we will treat + Pod's nodeAffinity/nodeSelector when calculating pod topology + spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector + are included in the calculations. - Ignore: nodeAffinity/nodeSelector + are ignored. All nodes are included in the calculations. \n + If this value is nil, the behavior is equivalent to the Honor + policy. This is a beta-level feature default enabled by the + NodeInclusionPolicyInPodTopologySpread feature flag." + type: string + nodeTaintsPolicy: + description: "NodeTaintsPolicy indicates how we will treat node + taints when calculating pod topology spread skew. Options + are: - Honor: nodes without taints, along with tainted nodes + for which the incoming pod has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + \n If this value is nil, the behavior is equivalent to the + Ignore policy. This is a beta-level feature default enabled + by the NodeInclusionPolicyInPodTopologySpread feature flag." + type: string + topologyKey: + description: TopologyKey is the key of node labels. Nodes that + have a label with this key and identical values are considered + to be in the same topology. We consider each + as a "bucket", and try to put balanced number of pods into + each bucket. We define a domain as a particular instance of + a topology. Also, we define an eligible domain as a domain + whose nodes meet the requirements of nodeAffinityPolicy and + nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", + each Node is a domain of that topology. And, if TopologyKey + is "topology.kubernetes.io/zone", each zone is a domain of + that topology. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with a + pod if it doesn''t satisfy the spread constraint. - DoNotSchedule + (default) tells the scheduler not to schedule it. - ScheduleAnyway + tells the scheduler to schedule the pod in any location, but + giving higher precedence to topologies that would help reduce + the skew. A constraint is considered "Unsatisfiable" for an + incoming pod if and only if every possible node assignment + for that pod would violate "MaxSkew" on some topology. For + example, in a 3-zone cluster, MaxSkew is set to 1, and pods + with the same labelSelector spread as 3/1/1: | zone1 | zone2 + | zone3 | | P P P | P | P | If WhenUnsatisfiable is + set to DoNotSchedule, incoming pod can only be scheduled to + zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on + zone2(zone3) satisfies MaxSkew(1). In other words, the cluster + can still be imbalanced, but scheduler won''t make it *more* + imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array upgradeStrategy: description: UpgradeStrategy represents how the operator will handle upgrades to the CR when a newer version of the operator is deployed diff --git a/charts/opentelemetry-operator/examples/default/rendered/admission-webhooks/operator-webhook-with-cert-manager.yaml b/charts/opentelemetry-operator/examples/default/rendered/admission-webhooks/operator-webhook-with-cert-manager.yaml index 4de462f8b..43a6a33e9 100644 --- a/charts/opentelemetry-operator/examples/default/rendered/admission-webhooks/operator-webhook-with-cert-manager.yaml +++ b/charts/opentelemetry-operator/examples/default/rendered/admission-webhooks/operator-webhook-with-cert-manager.yaml @@ -6,9 +6,9 @@ metadata: annotations: cert-manager.io/inject-ca-from: default/example-opentelemetry-operator-serving-cert labels: - helm.sh/chart: opentelemetry-operator-0.34.1 + helm.sh/chart: opentelemetry-operator-0.35.0 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: webhook @@ -85,9 +85,9 @@ metadata: annotations: cert-manager.io/inject-ca-from: default/example-opentelemetry-operator-serving-cert labels: - helm.sh/chart: opentelemetry-operator-0.34.1 + helm.sh/chart: opentelemetry-operator-0.35.0 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: webhook diff --git a/charts/opentelemetry-operator/examples/default/rendered/certmanager.yaml b/charts/opentelemetry-operator/examples/default/rendered/certmanager.yaml index 413081956..6c0a69f02 100644 --- a/charts/opentelemetry-operator/examples/default/rendered/certmanager.yaml +++ b/charts/opentelemetry-operator/examples/default/rendered/certmanager.yaml @@ -4,9 +4,9 @@ apiVersion: cert-manager.io/v1 kind: Certificate metadata: labels: - helm.sh/chart: opentelemetry-operator-0.34.1 + helm.sh/chart: opentelemetry-operator-0.35.0 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: webhook @@ -29,9 +29,9 @@ apiVersion: cert-manager.io/v1 kind: Issuer metadata: labels: - helm.sh/chart: opentelemetry-operator-0.34.1 + helm.sh/chart: opentelemetry-operator-0.35.0 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: webhook diff --git a/charts/opentelemetry-operator/examples/default/rendered/clusterrole.yaml b/charts/opentelemetry-operator/examples/default/rendered/clusterrole.yaml index 16b6f75fc..ac00474ea 100644 --- a/charts/opentelemetry-operator/examples/default/rendered/clusterrole.yaml +++ b/charts/opentelemetry-operator/examples/default/rendered/clusterrole.yaml @@ -4,9 +4,9 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - helm.sh/chart: opentelemetry-operator-0.34.1 + helm.sh/chart: opentelemetry-operator-0.35.0 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: controller-manager @@ -201,9 +201,9 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - helm.sh/chart: opentelemetry-operator-0.34.1 + helm.sh/chart: opentelemetry-operator-0.35.0 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: controller-manager @@ -219,9 +219,9 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - helm.sh/chart: opentelemetry-operator-0.34.1 + helm.sh/chart: opentelemetry-operator-0.35.0 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: controller-manager diff --git a/charts/opentelemetry-operator/examples/default/rendered/clusterrolebinding.yaml b/charts/opentelemetry-operator/examples/default/rendered/clusterrolebinding.yaml index 16a91d823..5032c730d 100644 --- a/charts/opentelemetry-operator/examples/default/rendered/clusterrolebinding.yaml +++ b/charts/opentelemetry-operator/examples/default/rendered/clusterrolebinding.yaml @@ -4,9 +4,9 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: - helm.sh/chart: opentelemetry-operator-0.34.1 + helm.sh/chart: opentelemetry-operator-0.35.0 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: controller-manager @@ -25,9 +25,9 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: - helm.sh/chart: opentelemetry-operator-0.34.1 + helm.sh/chart: opentelemetry-operator-0.35.0 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: controller-manager diff --git a/charts/opentelemetry-operator/examples/default/rendered/deployment.yaml b/charts/opentelemetry-operator/examples/default/rendered/deployment.yaml index b5bdb9f11..7128c2d39 100644 --- a/charts/opentelemetry-operator/examples/default/rendered/deployment.yaml +++ b/charts/opentelemetry-operator/examples/default/rendered/deployment.yaml @@ -4,9 +4,9 @@ apiVersion: apps/v1 kind: Deployment metadata: labels: - helm.sh/chart: opentelemetry-operator-0.34.1 + helm.sh/chart: opentelemetry-operator-0.35.0 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: controller-manager @@ -33,13 +33,13 @@ spec: - --enable-leader-election - --health-probe-addr=:8081 - --webhook-port=9443 - - --collector-image=otel/opentelemetry-collector-contrib:0.81.0 + - --collector-image=otel/opentelemetry-collector-contrib:0.82.0 command: - /manager env: - name: ENABLE_WEBHOOKS value: "true" - image: "ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator:v0.81.0" + image: "ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator:v0.82.0" name: manager ports: - containerPort: 8080 diff --git a/charts/opentelemetry-operator/examples/default/rendered/role.yaml b/charts/opentelemetry-operator/examples/default/rendered/role.yaml index 45e1b0cc2..509f70a36 100644 --- a/charts/opentelemetry-operator/examples/default/rendered/role.yaml +++ b/charts/opentelemetry-operator/examples/default/rendered/role.yaml @@ -4,9 +4,9 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: labels: - helm.sh/chart: opentelemetry-operator-0.34.1 + helm.sh/chart: opentelemetry-operator-0.35.0 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: controller-manager diff --git a/charts/opentelemetry-operator/examples/default/rendered/rolebinding.yaml b/charts/opentelemetry-operator/examples/default/rendered/rolebinding.yaml index 048530150..e9688249f 100644 --- a/charts/opentelemetry-operator/examples/default/rendered/rolebinding.yaml +++ b/charts/opentelemetry-operator/examples/default/rendered/rolebinding.yaml @@ -4,9 +4,9 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: labels: - helm.sh/chart: opentelemetry-operator-0.34.1 + helm.sh/chart: opentelemetry-operator-0.35.0 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: controller-manager diff --git a/charts/opentelemetry-operator/examples/default/rendered/service.yaml b/charts/opentelemetry-operator/examples/default/rendered/service.yaml index 2f3afe782..da4b1f2f6 100644 --- a/charts/opentelemetry-operator/examples/default/rendered/service.yaml +++ b/charts/opentelemetry-operator/examples/default/rendered/service.yaml @@ -4,9 +4,9 @@ apiVersion: v1 kind: Service metadata: labels: - helm.sh/chart: opentelemetry-operator-0.34.1 + helm.sh/chart: opentelemetry-operator-0.35.0 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: controller-manager @@ -31,9 +31,9 @@ apiVersion: v1 kind: Service metadata: labels: - helm.sh/chart: opentelemetry-operator-0.34.1 + helm.sh/chart: opentelemetry-operator-0.35.0 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: controller-manager diff --git a/charts/opentelemetry-operator/examples/default/rendered/serviceaccount.yaml b/charts/opentelemetry-operator/examples/default/rendered/serviceaccount.yaml index 013c06744..34b725491 100644 --- a/charts/opentelemetry-operator/examples/default/rendered/serviceaccount.yaml +++ b/charts/opentelemetry-operator/examples/default/rendered/serviceaccount.yaml @@ -6,9 +6,9 @@ metadata: name: opentelemetry-operator namespace: default labels: - helm.sh/chart: opentelemetry-operator-0.34.1 + helm.sh/chart: opentelemetry-operator-0.35.0 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: controller-manager diff --git a/charts/opentelemetry-operator/examples/default/rendered/tests/test-certmanager-connection.yaml b/charts/opentelemetry-operator/examples/default/rendered/tests/test-certmanager-connection.yaml index 21e7b0fe5..abc8920f1 100644 --- a/charts/opentelemetry-operator/examples/default/rendered/tests/test-certmanager-connection.yaml +++ b/charts/opentelemetry-operator/examples/default/rendered/tests/test-certmanager-connection.yaml @@ -6,9 +6,9 @@ metadata: name: "example-opentelemetry-operator-cert-manager" namespace: default labels: - helm.sh/chart: opentelemetry-operator-0.34.1 + helm.sh/chart: opentelemetry-operator-0.35.0 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: webhook diff --git a/charts/opentelemetry-operator/examples/default/rendered/tests/test-service-connection.yaml b/charts/opentelemetry-operator/examples/default/rendered/tests/test-service-connection.yaml index fe8237bf3..1e410f80f 100644 --- a/charts/opentelemetry-operator/examples/default/rendered/tests/test-service-connection.yaml +++ b/charts/opentelemetry-operator/examples/default/rendered/tests/test-service-connection.yaml @@ -6,9 +6,9 @@ metadata: name: "example-opentelemetry-operator-metrics" namespace: default labels: - helm.sh/chart: opentelemetry-operator-0.34.1 + helm.sh/chart: opentelemetry-operator-0.35.0 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: controller-manager @@ -43,9 +43,9 @@ metadata: name: "example-opentelemetry-operator-webhook" namespace: default labels: - helm.sh/chart: opentelemetry-operator-0.34.1 + helm.sh/chart: opentelemetry-operator-0.35.0 app.kubernetes.io/name: opentelemetry-operator - app.kubernetes.io/version: "0.81.0" + app.kubernetes.io/version: "0.82.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: example app.kubernetes.io/component: controller-manager diff --git a/charts/opentelemetry-operator/values.yaml b/charts/opentelemetry-operator/values.yaml index f5da78638..1d17a2742 100644 --- a/charts/opentelemetry-operator/values.yaml +++ b/charts/opentelemetry-operator/values.yaml @@ -29,10 +29,10 @@ pdb: manager: image: repository: ghcr.io/open-telemetry/opentelemetry-operator/opentelemetry-operator - tag: v0.81.0 + tag: v0.82.0 collectorImage: repository: otel/opentelemetry-collector-contrib - tag: 0.81.0 + tag: 0.82.0 targetAllocatorImage: repository: "" tag: ""