From 0b1e7e74b9e1f31e12cb5799dae3988bf8445fa7 Mon Sep 17 00:00:00 2001 From: Calum Murray Date: Mon, 10 Jul 2023 14:49:03 -0400 Subject: [PATCH 01/11] Added kafka features config map to kafka broker docs --- .../broker-types/kafka-broker/README.md | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/docs/eventing/brokers/broker-types/kafka-broker/README.md b/docs/eventing/brokers/broker-types/kafka-broker/README.md index 1d6ee278b1..1e6ae624ab 100644 --- a/docs/eventing/brokers/broker-types/kafka-broker/README.md +++ b/docs/eventing/brokers/broker-types/kafka-broker/README.md @@ -257,6 +257,41 @@ spec: !!! note When using an external topic, the Knative Kafka Broker does not own the topic and is not responsible for managing the topic. This includes the topic lifecycle or its general validity. Other restrictions for general access to the topic may apply. See the documentation about using [Access Control Lists (ACLs)](https://kafka.apache.org/documentation/#security_authz). +## Configure Kafka features + +There are various kafka features/default values the Knative Kafka Broker uses when interacting with Kafka. You can configure these as follows: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-kafka-features + namespace: knative-eventing + data: + # Controls whether the dispatcher should use the rate limiter based on the number of virtual replicas. + # 1. Enabled: The rate limiter is applied. + # 2. Disabled: The rate limiter is not applied. + dispatcher.rate-limiter: "disabled" + # Controls whether the dispatcher should record additional metrics. + # 1. Enabled: The metrics are recorded. + # 2. Disabled: The metrics are not recorded. + dispatcher.ordered-executor-metrics: "disabled" + # Controls whether the controller should autoscale consumer resources with KEDA + # 1. Enabled: KEDA autoscaling of consumers will be setup. + # 2. Disabled: KEDA autoscaling of consumers will not be setup. + controller.autoscaler: "disabled"{% raw %} + # The Go text/template used to generate consumergroup ID for triggers. + # The template can reference the trigger Kubernetes metadata only. + triggers.consumergroup.template: "knative-trigger-{{ .Namespace }}-{{ .Name }}" + # The Go text/template used to generate topics for Brokers. + # The template can reference the broker Kubernetes metadata only. + brokers.topic.template: "knative-broker-{{ .Namespace }}-{{ .Name }}" + # The Go text/template used to generate topics for Channels. + # The template can reference the channel Kubernetes metadata only. + channels.topic.template: "knative-channel-{{ .Namespace }}-{{ .Name }}" + {% endraw %} +``` + ## Consumer Offsets Commit Interval Kafka consumers keep track of the last successfully sent events by committing offsets. From d28733625daff4d76b4d7903405a3ca6bde5f5e6 Mon Sep 17 00:00:00 2001 From: Calum Murray Date: Tue, 11 Jul 2023 14:20:16 -0400 Subject: [PATCH 02/11] Update docs/eventing/brokers/broker-types/kafka-broker/README.md Co-authored-by: Leo Li --- docs/eventing/brokers/broker-types/kafka-broker/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/eventing/brokers/broker-types/kafka-broker/README.md b/docs/eventing/brokers/broker-types/kafka-broker/README.md index 1e6ae624ab..367f64e677 100644 --- a/docs/eventing/brokers/broker-types/kafka-broker/README.md +++ b/docs/eventing/brokers/broker-types/kafka-broker/README.md @@ -272,6 +272,7 @@ metadata: # 1. Enabled: The rate limiter is applied. # 2. Disabled: The rate limiter is not applied. dispatcher.rate-limiter: "disabled" + # Controls whether the dispatcher should record additional metrics. # 1. Enabled: The metrics are recorded. # 2. Disabled: The metrics are not recorded. From ec26be3f4924ceb7c244db3e81389735c811f9df Mon Sep 17 00:00:00 2001 From: Calum Murray Date: Tue, 11 Jul 2023 14:20:21 -0400 Subject: [PATCH 03/11] Update docs/eventing/brokers/broker-types/kafka-broker/README.md Co-authored-by: Leo Li --- docs/eventing/brokers/broker-types/kafka-broker/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/eventing/brokers/broker-types/kafka-broker/README.md b/docs/eventing/brokers/broker-types/kafka-broker/README.md index 367f64e677..e960b16b40 100644 --- a/docs/eventing/brokers/broker-types/kafka-broker/README.md +++ b/docs/eventing/brokers/broker-types/kafka-broker/README.md @@ -277,6 +277,7 @@ metadata: # 1. Enabled: The metrics are recorded. # 2. Disabled: The metrics are not recorded. dispatcher.ordered-executor-metrics: "disabled" + # Controls whether the controller should autoscale consumer resources with KEDA # 1. Enabled: KEDA autoscaling of consumers will be setup. # 2. Disabled: KEDA autoscaling of consumers will not be setup. From 75c42fecbcafadb9713937a4a677c7dea4afee29 Mon Sep 17 00:00:00 2001 From: Calum Murray Date: Tue, 11 Jul 2023 14:20:30 -0400 Subject: [PATCH 04/11] Update docs/eventing/brokers/broker-types/kafka-broker/README.md Co-authored-by: Leo Li --- docs/eventing/brokers/broker-types/kafka-broker/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/eventing/brokers/broker-types/kafka-broker/README.md b/docs/eventing/brokers/broker-types/kafka-broker/README.md index e960b16b40..bf635c42cc 100644 --- a/docs/eventing/brokers/broker-types/kafka-broker/README.md +++ b/docs/eventing/brokers/broker-types/kafka-broker/README.md @@ -282,6 +282,7 @@ metadata: # 1. Enabled: KEDA autoscaling of consumers will be setup. # 2. Disabled: KEDA autoscaling of consumers will not be setup. controller.autoscaler: "disabled"{% raw %} + # The Go text/template used to generate consumergroup ID for triggers. # The template can reference the trigger Kubernetes metadata only. triggers.consumergroup.template: "knative-trigger-{{ .Namespace }}-{{ .Name }}" From 6edda0175ce33e75eb0ec4763e3248beb51395e6 Mon Sep 17 00:00:00 2001 From: Calum Murray Date: Tue, 11 Jul 2023 14:20:35 -0400 Subject: [PATCH 05/11] Update docs/eventing/brokers/broker-types/kafka-broker/README.md Co-authored-by: Leo Li --- docs/eventing/brokers/broker-types/kafka-broker/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/eventing/brokers/broker-types/kafka-broker/README.md b/docs/eventing/brokers/broker-types/kafka-broker/README.md index bf635c42cc..6344689f7b 100644 --- a/docs/eventing/brokers/broker-types/kafka-broker/README.md +++ b/docs/eventing/brokers/broker-types/kafka-broker/README.md @@ -289,6 +289,7 @@ metadata: # The Go text/template used to generate topics for Brokers. # The template can reference the broker Kubernetes metadata only. brokers.topic.template: "knative-broker-{{ .Namespace }}-{{ .Name }}" + # The Go text/template used to generate topics for Channels. # The template can reference the channel Kubernetes metadata only. channels.topic.template: "knative-channel-{{ .Namespace }}-{{ .Name }}" From 600c99abd3cb3a5def7a7f3867c680efbef602f5 Mon Sep 17 00:00:00 2001 From: Calum Murray Date: Tue, 11 Jul 2023 14:20:40 -0400 Subject: [PATCH 06/11] Update docs/eventing/brokers/broker-types/kafka-broker/README.md Co-authored-by: Leo Li --- docs/eventing/brokers/broker-types/kafka-broker/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/eventing/brokers/broker-types/kafka-broker/README.md b/docs/eventing/brokers/broker-types/kafka-broker/README.md index 6344689f7b..336a02790e 100644 --- a/docs/eventing/brokers/broker-types/kafka-broker/README.md +++ b/docs/eventing/brokers/broker-types/kafka-broker/README.md @@ -286,6 +286,7 @@ metadata: # The Go text/template used to generate consumergroup ID for triggers. # The template can reference the trigger Kubernetes metadata only. triggers.consumergroup.template: "knative-trigger-{{ .Namespace }}-{{ .Name }}" + # The Go text/template used to generate topics for Brokers. # The template can reference the broker Kubernetes metadata only. brokers.topic.template: "knative-broker-{{ .Namespace }}-{{ .Name }}" From ef97b636dc54197b2e8951ac1d4c686526b88654 Mon Sep 17 00:00:00 2001 From: Calum Murray Date: Wed, 12 Jul 2023 09:41:12 -0400 Subject: [PATCH 07/11] Update docs/eventing/brokers/broker-types/kafka-broker/README.md Co-authored-by: Pierangelo Di Pilato --- docs/eventing/brokers/broker-types/kafka-broker/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/eventing/brokers/broker-types/kafka-broker/README.md b/docs/eventing/brokers/broker-types/kafka-broker/README.md index 336a02790e..3730908657 100644 --- a/docs/eventing/brokers/broker-types/kafka-broker/README.md +++ b/docs/eventing/brokers/broker-types/kafka-broker/README.md @@ -257,7 +257,7 @@ spec: !!! note When using an external topic, the Knative Kafka Broker does not own the topic and is not responsible for managing the topic. This includes the topic lifecycle or its general validity. Other restrictions for general access to the topic may apply. See the documentation about using [Access Control Lists (ACLs)](https://kafka.apache.org/documentation/#security_authz). -## Configure Kafka features +## Configure Knative Eventing Kafka features There are various kafka features/default values the Knative Kafka Broker uses when interacting with Kafka. You can configure these as follows: From 1b29c60893faf6a1f4ab007a454183025eac837f Mon Sep 17 00:00:00 2001 From: Calum Murray Date: Fri, 14 Jul 2023 15:33:04 -0400 Subject: [PATCH 08/11] Moved kafka features config info to new page --- config/nav.yml | 4 +- .../broker-types/kafka-broker/README.md | 172 +-------------- .../configuring-kafka-features.md | 206 ++++++++++++++++++ 3 files changed, 211 insertions(+), 171 deletions(-) create mode 100644 docs/eventing/brokers/broker-types/kafka-broker/configuring-kafka-features.md diff --git a/config/nav.yml b/config/nav.yml index aa52e2c9ac..0661cba810 100644 --- a/config/nav.yml +++ b/config/nav.yml @@ -190,7 +190,9 @@ nav: - Available Broker types: eventing/brokers/broker-types/README.md # add default IMC broker page, page explaining broker types - Channel based Broker: eventing/brokers/broker-types/channel-based-broker/README.md - - Apache Kafka: eventing/brokers/broker-types/kafka-broker/README.md + - Apache Kafka: + - About Apache Kafka Broker: eventing/brokers/broker-types/kafka-broker/README.md + - Configuring Kafka features: eventing/brokers/broker-types/kafka-broker/configuring-kafka-features.md - RabbitMQ Broker: eventing/brokers/broker-types/rabbitmq-broker/README.md - Creating a Broker: eventing/brokers/create-broker.md - Developer configuration options: eventing/brokers/broker-developer-config-options.md diff --git a/docs/eventing/brokers/broker-types/kafka-broker/README.md b/docs/eventing/brokers/broker-types/kafka-broker/README.md index 3730908657..ab8d3d9d95 100644 --- a/docs/eventing/brokers/broker-types/kafka-broker/README.md +++ b/docs/eventing/brokers/broker-types/kafka-broker/README.md @@ -6,7 +6,7 @@ Notable features are: - Control plane High Availability - Horizontally scalable data plane -- [Extensively configurable](#kafka-producer-and-consumer-configurations) +- [Extensively configurable](./configuring-kafka-features) - Ordered delivery of events based on [CloudEvents partitioning extension](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/extensions/partitioning.md) - Support any Kafka version, see [compatibility matrix](https://cwiki.apache.org/confluence/display/KAFKA/Compatibility+Matrix) - Supports 2 [data plane modes](#data-plane-isolation-vs-shared-data-plane): data plane isolation per-namespace or shared data plane @@ -257,174 +257,6 @@ spec: !!! note When using an external topic, the Knative Kafka Broker does not own the topic and is not responsible for managing the topic. This includes the topic lifecycle or its general validity. Other restrictions for general access to the topic may apply. See the documentation about using [Access Control Lists (ACLs)](https://kafka.apache.org/documentation/#security_authz). -## Configure Knative Eventing Kafka features - -There are various kafka features/default values the Knative Kafka Broker uses when interacting with Kafka. You can configure these as follows: - -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: config-kafka-features - namespace: knative-eventing - data: - # Controls whether the dispatcher should use the rate limiter based on the number of virtual replicas. - # 1. Enabled: The rate limiter is applied. - # 2. Disabled: The rate limiter is not applied. - dispatcher.rate-limiter: "disabled" - - # Controls whether the dispatcher should record additional metrics. - # 1. Enabled: The metrics are recorded. - # 2. Disabled: The metrics are not recorded. - dispatcher.ordered-executor-metrics: "disabled" - - # Controls whether the controller should autoscale consumer resources with KEDA - # 1. Enabled: KEDA autoscaling of consumers will be setup. - # 2. Disabled: KEDA autoscaling of consumers will not be setup. - controller.autoscaler: "disabled"{% raw %} - - # The Go text/template used to generate consumergroup ID for triggers. - # The template can reference the trigger Kubernetes metadata only. - triggers.consumergroup.template: "knative-trigger-{{ .Namespace }}-{{ .Name }}" - - # The Go text/template used to generate topics for Brokers. - # The template can reference the broker Kubernetes metadata only. - brokers.topic.template: "knative-broker-{{ .Namespace }}-{{ .Name }}" - - # The Go text/template used to generate topics for Channels. - # The template can reference the channel Kubernetes metadata only. - channels.topic.template: "knative-channel-{{ .Namespace }}-{{ .Name }}" - {% endraw %} -``` - -## Consumer Offsets Commit Interval - -Kafka consumers keep track of the last successfully sent events by committing offsets. - -Knative Kafka Broker commits the offset every `auto.commit.interval.ms` milliseconds. - -!!! note - To prevent negative impacts to performance, it is not recommended committing - offsets every time an event is successfully sent to a subscriber. - -The interval can be changed by changing the `config-kafka-broker-data-plane` `ConfigMap` -in the `knative-eventing` namespace by modifying the parameter `auto.commit.interval.ms` as follows: - -```yaml - -apiVersion: v1 -kind: ConfigMap -metadata: - name: config-kafka-broker-data-plane - namespace: knative-eventing -data: - # Some configurations omitted ... - config-kafka-broker-consumer.properties: | - # Some configurations omitted ... - - # Commit the offset every 5000 millisecods (5 seconds) - auto.commit.interval.ms=5000 -``` - -!!! note - Knative Kafka Broker guarantees at least once delivery, which means that your applications may - receive duplicate events. A higher commit interval means that there is a higher probability of - receiving duplicate events, because when a Consumer restarts, it restarts from the last - committed offset. - -## Kafka Producer and Consumer configurations - -Knative exposes all available Kafka producer and consumer configurations that can be modified to suit your workloads. - -You can change these configurations by modifying the `config-kafka-broker-data-plane` `ConfigMap` in -the `knative-eventing` namespace. - -Documentation for the settings available in this `ConfigMap` is available on the -[Apache Kafka website](https://kafka.apache.org/documentation/), -in particular, [Producer configurations](https://kafka.apache.org/documentation/#producerconfigs) -and [Consumer configurations](https://kafka.apache.org/documentation/#consumerconfigs). - -## Enable debug logging for data plane components - -The following YAML shows the default logging configuration for data plane components, that is created during the -installation step: - -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: kafka-config-logging - namespace: knative-eventing -data: - config.xml: | - - - - - - - - -``` - -To change the logging level to `DEBUG`, you must: - -1. Apply the following `kafka-config-logging` `ConfigMap` or replace `level="INFO"` with `level="DEBUG"` to the -`ConfigMap` `kafka-config-logging`: - - ```yaml - apiVersion: v1 - kind: ConfigMap - metadata: - name: kafka-config-logging - namespace: knative-eventing - data: - config.xml: | - - - - - - - - - ``` - -2. Restart the `kafka-broker-receiver` and the `kafka-broker-dispatcher`, by entering the following commands: - - ```bash - kubectl rollout restart deployment -n knative-eventing kafka-broker-receiver - kubectl rollout restart deployment -n knative-eventing kafka-broker-dispatcher - ``` - -## Configuring the order of delivered events - -When dispatching events, the Kafka broker can be configured to support different delivery ordering guarantees. - -You can configure the delivery order of events using the `kafka.eventing.knative.dev/delivery.order` annotation on the `Trigger` object: - -```yaml -apiVersion: eventing.knative.dev/v1 -kind: Trigger -metadata: - name: my-service-trigger - annotations: - kafka.eventing.knative.dev/delivery.order: ordered -spec: - broker: my-kafka-broker - subscriber: - ref: - apiVersion: serving.knative.dev/v1 - kind: Service - name: my-service -``` - -The supported consumer delivery guarantees are: - -* `unordered`: An unordered consumer is a non-blocking consumer that delivers messages unordered, while preserving proper offset management. Useful when there is a high demand of parallel consumption and no need for explicit ordering. One example could be processing of click analytics. -* `ordered`: An ordered consumer is a per-partition blocking consumer that waits for a successful response from the CloudEvent subscriber before it delivers the next message of the partition. Useful when there is a need for more strict ordering or if there is a relationship or grouping between events. One example could be processing of customer orders. - -The `unordered` delivery is the default ordering guarantee. ## Data plane Isolation vs Shared Data plane @@ -480,7 +312,7 @@ Upon the creation of the first `Broker` with `KafkaNamespaced` class, the `kafka All the configuration mechanisms that are available for the `Kafka` Broker class are also available for the brokers with `KafkaNamespaced` class with these exceptions: -* [Above](#kafka-producer-and-consumer-configurations) it is described how producer and consumer configurations is done by modifying the `config-kafka-broker-data-plane` configmap in the `knative-eventing` namespace. Since Kafka Broker controller propagates this configmap into the user namespace, currently there is no way to configure producer and consumer configurations per namespace. Any value set in the `config-kafka-broker-data-plane` `ConfigMap` in the `knative-eventing` namespace will be also used in the user namespace. +* [This page](./configuring-kafka-features) describes how producer and consumer configurations is done by modifying the `config-kafka-broker-data-plane` configmap in the `knative-eventing` namespace. Since Kafka Broker controller propagates this configmap into the user namespace, currently there is no way to configure producer and consumer configurations per namespace. Any value set in the `config-kafka-broker-data-plane` `ConfigMap` in the `knative-eventing` namespace will be also used in the user namespace. * Because of the same propagation, it is also not possible to configure consumer offsets commit interval per namespace. * A few more configmaps are propagated: `config-tracing` and `kafka-config-logging`. This means, tracing and logging are also not configurable per namespace. * Similarly, the data plane deployments are propagated from the `knative-eventing` namespace to the user namespace. This means that the data plane deployments are not configurable per namespace and will be identical to the ones in the `knative-eventing` namespace. diff --git a/docs/eventing/brokers/broker-types/kafka-broker/configuring-kafka-features.md b/docs/eventing/brokers/broker-types/kafka-broker/configuring-kafka-features.md new file mode 100644 index 0000000000..c12ca46f08 --- /dev/null +++ b/docs/eventing/brokers/broker-types/kafka-broker/configuring-kafka-features.md @@ -0,0 +1,206 @@ +# Configuring Kafka Features + +There are many different configuration options for how Knative Eventing and the Kafka Broker interact with Apache Kafka. + +## Configure Knative Eventing Kafka features + +There are various kafka features/default values the Knative Kafka Broker uses when interacting with Kafka. + +### Dispatcher rate limiter + +By default, there will be no rate limiting applied as events are dispatched from the data-plane. However, if you would like to enable rate limiting based on the number of +virtual replicas, you can set `dispatcher.rate-limiter: "enabled"` in the config map below. + +### Dispatcher ordered execution metrics + +By default, only a subset of availabe metrics are collected from the data-plane event dispatcher. If you would like to enable additional metrics to track the ordered execution, +you can set `dispatcher.ordered-executor-metrics: "enabled"` in the config map below. + +### Controller autoscaler + +By default, the controller will not autoscale the resources being used by the Kafka consumers within the data-plane. If you would like to enable autoscaling of these +resources with [KEDA](https://keda.sh/), then you can set `controller.autoscaler: "enabled"` in the config map below. + +### Consumer Group ID + +By default, the consumer group ID of each Kafka consumer created in the data-plane from a trigger will be made from the template `{% raw %}"knative-trigger-{{ .Namespace }}-{{ .Name }}{% endraw %}`. +If you want to assign consumer group IDs to your triggers in a different way, you can set use any valid [go text/template](https://pkg.go.dev/text/template) text template +to generate the consumer group IDs by setting `triggers.consumergroup.template: "your-text-template-here"` in the config map below. + +### Broker topic template + +By default, the topic created by a Kafka Broker is named from the template `{% raw %}knative-broker-{{ .Namespace }}-{{ .Name }}{% endraw %}`. If you want to change the template +used to name Broker topics, you can set `brokers.topic.template: "your-text-template-here"`, where `"your-text-template-here"` is any valid [go text/template](https://pkg.go.dev/text/template) +in the config map below. + +## Channel topic template + +By default, the topic created by a Kafka Channel is named from the template `{% raw %}messaging-kafka.{{ .Namespace }}.{{ .Name }}"`. If you want to change the template +used to name Channel topics, you can set `channels.topic.template: "your-text-template-here"`, where `"your-text-template-here"` is any valid [go text/template](https://pkg.go.dev/text/template) +in the config map below. + + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-kafka-features + namespace: knative-eventing + data: + # Controls whether the dispatcher should use the rate limiter based on the number of virtual replicas. + # 1. Enabled: The rate limiter is applied. + # 2. Disabled: The rate limiter is not applied. + dispatcher.rate-limiter: "disabled" + + # Controls whether the dispatcher should record additional metrics. + # 1. Enabled: The metrics are recorded. + # 2. Disabled: The metrics are not recorded. + dispatcher.ordered-executor-metrics: "disabled" + + # Controls whether the controller should autoscale consumer resources with KEDA + # 1. Enabled: KEDA autoscaling of consumers will be setup. + # 2. Disabled: KEDA autoscaling of consumers will not be setup. + controller.autoscaler: "disabled"{% raw %} + + # The Go text/template used to generate consumergroup ID for triggers. + # The template can reference the trigger Kubernetes metadata only. + triggers.consumergroup.template: "knative-trigger-{{ .Namespace }}-{{ .Name }}" + + # The Go text/template used to generate topics for Brokers. + # The template can reference the broker Kubernetes metadata only. + brokers.topic.template: "knative-broker-{{ .Namespace }}-{{ .Name }}" + + # The Go text/template used to generate topics for Channels. + # The template can reference the channel Kubernetes metadata only. + channels.topic.template: "messaging-kafka.{{ .Namespace }}.{{ .Name }}" + {% endraw %} +``` + +## Consumer Offsets Commit Interval + +Kafka consumers keep track of the last successfully sent events by committing offsets. + +Knative Kafka Broker commits the offset every `auto.commit.interval.ms` milliseconds. + +!!! note + To prevent negative impacts to performance, it is not recommended committing + offsets every time an event is successfully sent to a subscriber. + +The interval can be changed by changing the `config-kafka-broker-data-plane` `ConfigMap` +in the `knative-eventing` namespace by modifying the parameter `auto.commit.interval.ms` as follows: + +```yaml + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-kafka-broker-data-plane + namespace: knative-eventing +data: + # Some configurations omitted ... + config-kafka-broker-consumer.properties: | + # Some configurations omitted ... + + # Commit the offset every 5000 millisecods (5 seconds) + auto.commit.interval.ms=5000 +``` + +!!! note + Knative Kafka Broker guarantees at least once delivery, which means that your applications may + receive duplicate events. A higher commit interval means that there is a higher probability of + receiving duplicate events, because when a Consumer restarts, it restarts from the last + committed offset. + +## Kafka Producer and Consumer configurations + +Knative exposes all available Kafka producer and consumer configurations that can be modified to suit your workloads. + +You can change these configurations by modifying the `config-kafka-broker-data-plane` `ConfigMap` in +the `knative-eventing` namespace. + +Documentation for the settings available in this `ConfigMap` is available on the +[Apache Kafka website](https://kafka.apache.org/documentation/), +in particular, [Producer configurations](https://kafka.apache.org/documentation/#producerconfigs) +and [Consumer configurations](https://kafka.apache.org/documentation/#consumerconfigs). + +## Enable debug logging for data plane components + +The following YAML shows the default logging configuration for data plane components, that is created during the +installation step: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: kafka-config-logging + namespace: knative-eventing +data: + config.xml: | + + + + + + + + +``` + +To change the logging level to `DEBUG`, you must: + +1. Apply the following `kafka-config-logging` `ConfigMap` or replace `level="INFO"` with `level="DEBUG"` to the +`ConfigMap` `kafka-config-logging`: + + ```yaml + apiVersion: v1 + kind: ConfigMap + metadata: + name: kafka-config-logging + namespace: knative-eventing + data: + config.xml: | + + + + + + + + + ``` + +2. Restart the `kafka-broker-receiver` and the `kafka-broker-dispatcher`, by entering the following commands: + + ```bash + kubectl rollout restart deployment -n knative-eventing kafka-broker-receiver + kubectl rollout restart deployment -n knative-eventing kafka-broker-dispatcher + ``` + +## Configuring the order of delivered events + +When dispatching events, the Kafka broker can be configured to support different delivery ordering guarantees. + +You can configure the delivery order of events using the `kafka.eventing.knative.dev/delivery.order` annotation on the `Trigger` object: + +```yaml +apiVersion: eventing.knative.dev/v1 +kind: Trigger +metadata: + name: my-service-trigger + annotations: + kafka.eventing.knative.dev/delivery.order: ordered +spec: + broker: my-kafka-broker + subscriber: + ref: + apiVersion: serving.knative.dev/v1 + kind: Service + name: my-service +``` + +The supported consumer delivery guarantees are: + +* `unordered`: An unordered consumer is a non-blocking consumer that delivers messages unordered, while preserving proper offset management. Useful when there is a high demand of parallel consumption and no need for explicit ordering. One example could be processing of click analytics. +* `ordered`: An ordered consumer is a per-partition blocking consumer that waits for a successful response from the CloudEvent subscriber before it delivers the next message of the partition. Useful when there is a need for more strict ordering or if there is a relationship or grouping between events. One example could be processing of customer orders. + +The `unordered` delivery is the default ordering guarantee. From 9e1b169e678896c34c25872baba5f9ef6871bd77 Mon Sep 17 00:00:00 2001 From: Calum Murray Date: Wed, 23 Aug 2023 13:38:50 -0400 Subject: [PATCH 09/11] Apply suggestions from code review Co-authored-by: Pierangelo Di Pilato --- .../kafka-broker/configuring-kafka-features.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/eventing/brokers/broker-types/kafka-broker/configuring-kafka-features.md b/docs/eventing/brokers/broker-types/kafka-broker/configuring-kafka-features.md index c12ca46f08..dff0162270 100644 --- a/docs/eventing/brokers/broker-types/kafka-broker/configuring-kafka-features.md +++ b/docs/eventing/brokers/broker-types/kafka-broker/configuring-kafka-features.md @@ -1,6 +1,6 @@ # Configuring Kafka Features -There are many different configuration options for how Knative Eventing and the Kafka Broker interact with Apache Kafka. +There are many different configuration options for how Knative Eventing and the Knaitve Broker for Apache Kafka interact with the Apache Kafka clusters. ## Configure Knative Eventing Kafka features @@ -21,19 +21,19 @@ you can set `dispatcher.ordered-executor-metrics: "enabled"` in the config map b By default, the controller will not autoscale the resources being used by the Kafka consumers within the data-plane. If you would like to enable autoscaling of these resources with [KEDA](https://keda.sh/), then you can set `controller.autoscaler: "enabled"` in the config map below. -### Consumer Group ID +### Consumer Group ID for Triggers By default, the consumer group ID of each Kafka consumer created in the data-plane from a trigger will be made from the template `{% raw %}"knative-trigger-{{ .Namespace }}-{{ .Name }}{% endraw %}`. If you want to assign consumer group IDs to your triggers in a different way, you can set use any valid [go text/template](https://pkg.go.dev/text/template) text template to generate the consumer group IDs by setting `triggers.consumergroup.template: "your-text-template-here"` in the config map below. -### Broker topic template +### Broker topic name template By default, the topic created by a Kafka Broker is named from the template `{% raw %}knative-broker-{{ .Namespace }}-{{ .Name }}{% endraw %}`. If you want to change the template used to name Broker topics, you can set `brokers.topic.template: "your-text-template-here"`, where `"your-text-template-here"` is any valid [go text/template](https://pkg.go.dev/text/template) in the config map below. -## Channel topic template +## Channel topic name template By default, the topic created by a Kafka Channel is named from the template `{% raw %}messaging-kafka.{{ .Namespace }}.{{ .Name }}"`. If you want to change the template used to name Channel topics, you can set `channels.topic.template: "your-text-template-here"`, where `"your-text-template-here"` is any valid [go text/template](https://pkg.go.dev/text/template) From 1cbf66678455e356b971b39e5a7cd1ca88706819 Mon Sep 17 00:00:00 2001 From: Calum Murray Date: Wed, 20 Sep 2023 14:47:15 -0400 Subject: [PATCH 10/11] Follow example from serving for documenting configmap keys Signed-off-by: Calum Murray --- .../broker-types/kafka-broker/README.md | 128 +++++++++++ .../configuring-kafka-features.md | 216 ++++-------------- 2 files changed, 168 insertions(+), 176 deletions(-) diff --git a/docs/eventing/brokers/broker-types/kafka-broker/README.md b/docs/eventing/brokers/broker-types/kafka-broker/README.md index ab8d3d9d95..705f5fe3cf 100644 --- a/docs/eventing/brokers/broker-types/kafka-broker/README.md +++ b/docs/eventing/brokers/broker-types/kafka-broker/README.md @@ -257,6 +257,134 @@ spec: !!! note When using an external topic, the Knative Kafka Broker does not own the topic and is not responsible for managing the topic. This includes the topic lifecycle or its general validity. Other restrictions for general access to the topic may apply. See the documentation about using [Access Control Lists (ACLs)](https://kafka.apache.org/documentation/#security_authz). +## Consumer Offsets Commit Interval + +Kafka consumers keep track of the last successfully sent events by committing offsets. + +Knative Kafka Broker commits the offset every `auto.commit.interval.ms` milliseconds. + +!!! note + To prevent negative impacts to performance, it is not recommended committing + offsets every time an event is successfully sent to a subscriber. + +The interval can be changed by changing the `config-kafka-broker-data-plane` `ConfigMap` +in the `knative-eventing` namespace by modifying the parameter `auto.commit.interval.ms` as follows: + +```yaml + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-kafka-broker-data-plane + namespace: knative-eventing +data: + # Some configurations omitted ... + config-kafka-broker-consumer.properties: | + # Some configurations omitted ... + + # Commit the offset every 5000 millisecods (5 seconds) + auto.commit.interval.ms=5000 +``` + +!!! note + Knative Kafka Broker guarantees at least once delivery, which means that your applications may + receive duplicate events. A higher commit interval means that there is a higher probability of + receiving duplicate events, because when a Consumer restarts, it restarts from the last + committed offset. + +## Kafka Producer and Consumer configurations + +Knative exposes all available Kafka producer and consumer configurations that can be modified to suit your workloads. + +You can change these configurations by modifying the `config-kafka-broker-data-plane` `ConfigMap` in +the `knative-eventing` namespace. + +Documentation for the settings available in this `ConfigMap` is available on the +[Apache Kafka website](https://kafka.apache.org/documentation/), +in particular, [Producer configurations](https://kafka.apache.org/documentation/#producerconfigs) +and [Consumer configurations](https://kafka.apache.org/documentation/#consumerconfigs). + +## Enable debug logging for data plane components + +The following YAML shows the default logging configuration for data plane components, that is created during the +installation step: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: kafka-config-logging + namespace: knative-eventing +data: + config.xml: | + + + + + + + + +``` + +To change the logging level to `DEBUG`, you must: + +1. Apply the following `kafka-config-logging` `ConfigMap` or replace `level="INFO"` with `level="DEBUG"` to the +`ConfigMap` `kafka-config-logging`: + + ```yaml + apiVersion: v1 + kind: ConfigMap + metadata: + name: kafka-config-logging + namespace: knative-eventing + data: + config.xml: | + + + + + + + + + ``` + +2. Restart the `kafka-broker-receiver` and the `kafka-broker-dispatcher`, by entering the following commands: + + ```bash + kubectl rollout restart deployment -n knative-eventing kafka-broker-receiver + kubectl rollout restart deployment -n knative-eventing kafka-broker-dispatcher + ``` + +## Configuring the order of delivered events + +When dispatching events, the Kafka broker can be configured to support different delivery ordering guarantees. + +You can configure the delivery order of events using the `kafka.eventing.knative.dev/delivery.order` annotation on the `Trigger` object: + +```yaml +apiVersion: eventing.knative.dev/v1 +kind: Trigger +metadata: + name: my-service-trigger + annotations: + kafka.eventing.knative.dev/delivery.order: ordered +spec: + broker: my-kafka-broker + subscriber: + ref: + apiVersion: serving.knative.dev/v1 + kind: Service + name: my-service +``` + +The supported consumer delivery guarantees are: + +* `unordered`: An unordered consumer is a non-blocking consumer that delivers messages unordered, while preserving proper offset management. Useful when there is a high demand of parallel consumption and no need for explicit ordering. One example could be processing of click analytics. +* `ordered`: An ordered consumer is a per-partition blocking consumer that waits for a successful response from the CloudEvent subscriber before it delivers the next message of the partition. Useful when there is a need for more strict ordering or if there is a relationship or grouping between events. One example could be processing of customer orders. + +The `unordered` delivery is the default ordering guarantee. ## Data plane Isolation vs Shared Data plane diff --git a/docs/eventing/brokers/broker-types/kafka-broker/configuring-kafka-features.md b/docs/eventing/brokers/broker-types/kafka-broker/configuring-kafka-features.md index dff0162270..ad991e1d82 100644 --- a/docs/eventing/brokers/broker-types/kafka-broker/configuring-kafka-features.md +++ b/docs/eventing/brokers/broker-types/kafka-broker/configuring-kafka-features.md @@ -6,201 +6,65 @@ There are many different configuration options for how Knative Eventing and the There are various kafka features/default values the Knative Kafka Broker uses when interacting with Kafka. -### Dispatcher rate limiter - -By default, there will be no rate limiting applied as events are dispatched from the data-plane. However, if you would like to enable rate limiting based on the number of -virtual replicas, you can set `dispatcher.rate-limiter: "enabled"` in the config map below. - -### Dispatcher ordered execution metrics - -By default, only a subset of availabe metrics are collected from the data-plane event dispatcher. If you would like to enable additional metrics to track the ordered execution, -you can set `dispatcher.ordered-executor-metrics: "enabled"` in the config map below. +### Consumer Group ID for Triggers -### Controller autoscaler +The `triggers.consumergroup.template` value determines the template used to generate the consumer group ID used by your triggers. -By default, the controller will not autoscale the resources being used by the Kafka consumers within the data-plane. If you would like to enable autoscaling of these -resources with [KEDA](https://keda.sh/), then you can set `controller.autoscaler: "enabled"` in the config map below. +* **Global key:** `triggers.consumergroup.template` +* **Possible values:**: Any valid [go text/template](https://pkg.go.dev/text/template) +* **Default:** `{% raw %}knative-trigger-{{ .Namespace }}-{{ .Name }}{% endraw %}` -### Consumer Group ID for Triggers +**Example:** -By default, the consumer group ID of each Kafka consumer created in the data-plane from a trigger will be made from the template `{% raw %}"knative-trigger-{{ .Namespace }}-{{ .Name }}{% endraw %}`. -If you want to assign consumer group IDs to your triggers in a different way, you can set use any valid [go text/template](https://pkg.go.dev/text/template) text template -to generate the consumer group IDs by setting `triggers.consumergroup.template: "your-text-template-here"` in the config map below. +=== "Global (ConfigMap)" + ```yaml + apiVersion: v1 + kind: ConfigMap + metadata: + name: config-kafka-features + namespace: knative-eventing + data: + triggers.consumergroup.template: {% raw %}"knative-trigger-{{ .Namespace }}-{{ .Name }}"{% endraw %} + ``` ### Broker topic name template -By default, the topic created by a Kafka Broker is named from the template `{% raw %}knative-broker-{{ .Namespace }}-{{ .Name }}{% endraw %}`. If you want to change the template -used to name Broker topics, you can set `brokers.topic.template: "your-text-template-here"`, where `"your-text-template-here"` is any valid [go text/template](https://pkg.go.dev/text/template) -in the config map below. +The `brokers.topic.template` values determines the template used to generate the Kafka topic names used by your brokers. -## Channel topic name template - -By default, the topic created by a Kafka Channel is named from the template `{% raw %}messaging-kafka.{{ .Namespace }}.{{ .Name }}"`. If you want to change the template -used to name Channel topics, you can set `channels.topic.template: "your-text-template-here"`, where `"your-text-template-here"` is any valid [go text/template](https://pkg.go.dev/text/template) -in the config map below. +* **Global Key:** `brokers.topic.template` +* **Possible values:** Any valid [go text/template](https://pkg.go.dev/text/template) +* **Default:** `{% raw %}knative-broker-{{ .Namespace }}-{{ .Name }}{% endraw %}` +**Example:** -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: config-kafka-features - namespace: knative-eventing - data: - # Controls whether the dispatcher should use the rate limiter based on the number of virtual replicas. - # 1. Enabled: The rate limiter is applied. - # 2. Disabled: The rate limiter is not applied. - dispatcher.rate-limiter: "disabled" - - # Controls whether the dispatcher should record additional metrics. - # 1. Enabled: The metrics are recorded. - # 2. Disabled: The metrics are not recorded. - dispatcher.ordered-executor-metrics: "disabled" - - # Controls whether the controller should autoscale consumer resources with KEDA - # 1. Enabled: KEDA autoscaling of consumers will be setup. - # 2. Disabled: KEDA autoscaling of consumers will not be setup. - controller.autoscaler: "disabled"{% raw %} - - # The Go text/template used to generate consumergroup ID for triggers. - # The template can reference the trigger Kubernetes metadata only. - triggers.consumergroup.template: "knative-trigger-{{ .Namespace }}-{{ .Name }}" - - # The Go text/template used to generate topics for Brokers. - # The template can reference the broker Kubernetes metadata only. - brokers.topic.template: "knative-broker-{{ .Namespace }}-{{ .Name }}" - - # The Go text/template used to generate topics for Channels. - # The template can reference the channel Kubernetes metadata only. - channels.topic.template: "messaging-kafka.{{ .Namespace }}.{{ .Name }}" - {% endraw %} -``` - -## Consumer Offsets Commit Interval - -Kafka consumers keep track of the last successfully sent events by committing offsets. - -Knative Kafka Broker commits the offset every `auto.commit.interval.ms` milliseconds. - -!!! note - To prevent negative impacts to performance, it is not recommended committing - offsets every time an event is successfully sent to a subscriber. - -The interval can be changed by changing the `config-kafka-broker-data-plane` `ConfigMap` -in the `knative-eventing` namespace by modifying the parameter `auto.commit.interval.ms` as follows: - -```yaml - -apiVersion: v1 -kind: ConfigMap -metadata: - name: config-kafka-broker-data-plane - namespace: knative-eventing -data: - # Some configurations omitted ... - config-kafka-broker-consumer.properties: | - # Some configurations omitted ... - - # Commit the offset every 5000 millisecods (5 seconds) - auto.commit.interval.ms=5000 -``` - -!!! note - Knative Kafka Broker guarantees at least once delivery, which means that your applications may - receive duplicate events. A higher commit interval means that there is a higher probability of - receiving duplicate events, because when a Consumer restarts, it restarts from the last - committed offset. - -## Kafka Producer and Consumer configurations - -Knative exposes all available Kafka producer and consumer configurations that can be modified to suit your workloads. - -You can change these configurations by modifying the `config-kafka-broker-data-plane` `ConfigMap` in -the `knative-eventing` namespace. - -Documentation for the settings available in this `ConfigMap` is available on the -[Apache Kafka website](https://kafka.apache.org/documentation/), -in particular, [Producer configurations](https://kafka.apache.org/documentation/#producerconfigs) -and [Consumer configurations](https://kafka.apache.org/documentation/#consumerconfigs). - -## Enable debug logging for data plane components - -The following YAML shows the default logging configuration for data plane components, that is created during the -installation step: - -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: kafka-config-logging - namespace: knative-eventing -data: - config.xml: | - - - - - - - - -``` - -To change the logging level to `DEBUG`, you must: - -1. Apply the following `kafka-config-logging` `ConfigMap` or replace `level="INFO"` with `level="DEBUG"` to the -`ConfigMap` `kafka-config-logging`: - +=== "Global (ConfigMap)" ```yaml apiVersion: v1 kind: ConfigMap metadata: - name: kafka-config-logging + name: config-kafka-features namespace: knative-eventing data: - config.xml: | - - - - - - - - - ``` - -2. Restart the `kafka-broker-receiver` and the `kafka-broker-dispatcher`, by entering the following commands: - - ```bash - kubectl rollout restart deployment -n knative-eventing kafka-broker-receiver - kubectl rollout restart deployment -n knative-eventing kafka-broker-dispatcher + brokers.topic.template: {% raw %}"knative-broker-{{ .Namespace }}-{{ .Name }}"(% endraw %} ``` -## Configuring the order of delivered events - -When dispatching events, the Kafka broker can be configured to support different delivery ordering guarantees. - -You can configure the delivery order of events using the `kafka.eventing.knative.dev/delivery.order` annotation on the `Trigger` object: +## Channel topic name template -```yaml -apiVersion: eventing.knative.dev/v1 -kind: Trigger -metadata: - name: my-service-trigger - annotations: - kafka.eventing.knative.dev/delivery.order: ordered -spec: - broker: my-kafka-broker - subscriber: - ref: - apiVersion: serving.knative.dev/v1 - kind: Service - name: my-service -``` +The `channels.topic.template` value determines the template used to generate the kafka topic names used by your channels. -The supported consumer delivery guarantees are: +* **Global Key:** `channels.topic.template` +* **Possible values:** Any valid [go text/template](https://pkg.go.dev/text/template) +* **Default:** `{% raw %}messaging-kafka.{{ .Namespace }}.{{ .Name }}{% endraw %}` -* `unordered`: An unordered consumer is a non-blocking consumer that delivers messages unordered, while preserving proper offset management. Useful when there is a high demand of parallel consumption and no need for explicit ordering. One example could be processing of click analytics. -* `ordered`: An ordered consumer is a per-partition blocking consumer that waits for a successful response from the CloudEvent subscriber before it delivers the next message of the partition. Useful when there is a need for more strict ordering or if there is a relationship or grouping between events. One example could be processing of customer orders. +**Example:** -The `unordered` delivery is the default ordering guarantee. +=== "Global (ConfigMap)" + ```yaml + apiVersion: v1 + kind: ConfigMap + metadata: + name: config-kafka-features + namespace: knative-eventing + data: + channels.topic.template: {% raw %}"messaging-kafka.{{ .Namespace }}.{{ .Name }}"(% endraw %} + ``` From 9dac7ea18f26547632341d14327727e7d326e6dd Mon Sep 17 00:00:00 2001 From: Calum Murray Date: Thu, 5 Oct 2023 10:58:06 -0400 Subject: [PATCH 11/11] Fix macro Signed-off-by: Calum Murray --- .../broker-types/kafka-broker/configuring-kafka-features.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/eventing/brokers/broker-types/kafka-broker/configuring-kafka-features.md b/docs/eventing/brokers/broker-types/kafka-broker/configuring-kafka-features.md index ad991e1d82..ed67487b47 100644 --- a/docs/eventing/brokers/broker-types/kafka-broker/configuring-kafka-features.md +++ b/docs/eventing/brokers/broker-types/kafka-broker/configuring-kafka-features.md @@ -45,7 +45,7 @@ The `brokers.topic.template` values determines the template used to generate the name: config-kafka-features namespace: knative-eventing data: - brokers.topic.template: {% raw %}"knative-broker-{{ .Namespace }}-{{ .Name }}"(% endraw %} + brokers.topic.template: {% raw %}"knative-broker-{{ .Namespace }}-{{ .Name }}"{% endraw %} ``` ## Channel topic name template @@ -66,5 +66,5 @@ The `channels.topic.template` value determines the template used to generate the name: config-kafka-features namespace: knative-eventing data: - channels.topic.template: {% raw %}"messaging-kafka.{{ .Namespace }}.{{ .Name }}"(% endraw %} + channels.topic.template: {% raw %}"messaging-kafka.{{ .Namespace }}.{{ .Name }}"{% endraw %} ```