Skip to content

Commit

Permalink
* Added support for consumer strategies that do not act as "stop the …
Browse files Browse the repository at this point in the history
…world" scenarios (e.g., cooperative sticky). Fixes issue Farfetch#557 and Fixes issue Farfetch#456

* Enabled automatic committing with `confluent auto commit: true` instead of relying solely on manual commits, but only when the consumer strategy is cooperative sticky. (Refer to the open librdkafka issue at confluentinc/librdkafka#4059).
  • Loading branch information
golanbz committed Sep 2, 2024
1 parent e121a5b commit 7b6f0ee
Show file tree
Hide file tree
Showing 4 changed files with 112 additions and 20 deletions.
4 changes: 3 additions & 1 deletion src/KafkaFlow/Configuration/ConsumerConfigurationBuilder.cs
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
using System.Linq;
using System.Threading.Tasks;
using KafkaFlow.Consumers.DistributionStrategies;
using KafkaFlow.Extensions;

namespace KafkaFlow.Configuration;

Expand Down Expand Up @@ -251,7 +252,8 @@ public IConsumerConfiguration Build(ClusterConfiguration clusterConfiguration)
consumerConfigCopy.StatisticsIntervalMs = _consumerConfig.StatisticsIntervalMs ?? _statisticsInterval;

consumerConfigCopy.EnableAutoOffsetStore = false;
consumerConfigCopy.EnableAutoCommit = false;
consumerConfigCopy.EnableAutoCommit = _consumerConfig.PartitionAssignmentStrategy.IsStopTheWorldStrategy() is false;
consumerConfigCopy.AutoCommitIntervalMs = _consumerConfig.AutoCommitIntervalMs ?? 5000;

consumerConfigCopy.ReadSecurityInformationFrom(clusterConfiguration);

Expand Down
70 changes: 56 additions & 14 deletions src/KafkaFlow/Consumers/Consumer.cs
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,15 @@
using Confluent.Kafka;
using KafkaFlow.Authentication;
using KafkaFlow.Configuration;
using KafkaFlow.Extensions;

namespace KafkaFlow.Consumers;

internal class Consumer : IConsumer
{
private readonly IDependencyResolver _dependencyResolver;
private readonly ILogHandler _logHandler;
private readonly bool _stopTheWorldStrategy;

private readonly List<Action<IDependencyResolver, IConsumer<byte[], byte[]>, List<TopicPartition>>>
_partitionsAssignedHandlers = new();
Expand All @@ -40,6 +42,7 @@ public Consumer(
this.Configuration = configuration;
_flowManager = new ConsumerFlowManager(this, _logHandler);
_maxPollIntervalExceeded = new(_logHandler);
_stopTheWorldStrategy = Configuration.GetKafkaConfig().PartitionAssignmentStrategy.IsStopTheWorldStrategy();

foreach (var handler in this.Configuration.StatisticsHandlers)
{
Expand Down Expand Up @@ -148,7 +151,17 @@ public void Commit(IReadOnlyCollection<Confluent.Kafka.TopicPartitionOffset> off
return;
}

_consumer.Commit(validOffsets);
if (_stopTheWorldStrategy)
{
_consumer.Commit(validOffsets);
}
else
{
foreach (var topicPartitionOffset in validOffsets)
{
_consumer.StoreOffset(topicPartitionOffset);
}
}

foreach (var offset in validOffsets)
{
Expand Down Expand Up @@ -237,17 +250,8 @@ private void EnsureConsumer()
var kafkaConfig = this.Configuration.GetKafkaConfig();

var consumerBuilder = new ConsumerBuilder<byte[], byte[]>(kafkaConfig)
.SetPartitionsAssignedHandler(
(consumer, partitions) => this.FirePartitionsAssignedHandlers(consumer, partitions))
.SetPartitionsRevokedHandler(
(consumer, partitions) =>
{
_partitionsRevokedHandlers.ForEach(handler => handler(_dependencyResolver, consumer, partitions));
this.Assignment = new List<TopicPartition>();
this.Subscription = new List<string>();
_currentPartitionsOffsets.Clear();
_flowManager.Stop();
})
.SetPartitionsAssignedHandler(FirePartitionsAssignedHandlers)
.SetPartitionsRevokedHandler(FirePartitionRevokedHandlers)
.SetErrorHandler((consumer, error) => _errorsHandlers.ForEach(x => x(consumer, error)))
.SetStatisticsHandler((consumer, statistics) => _statisticsHandlers.ForEach(x => x(consumer, statistics)));

Expand Down Expand Up @@ -293,13 +297,51 @@ private void FirePartitionsAssignedHandlers(
IConsumer<byte[], byte[]> consumer,
List<TopicPartition> partitions)
{
this.Assignment = partitions;
if (_stopTheWorldStrategy)
{
this.Assignment = partitions;
this.Subscription = consumer.Subscription;
_flowManager.Start(consumer);
_partitionsAssignedHandlers.ForEach(handler => handler(_dependencyResolver, consumer, partitions));
return;
}

if (partitions.Count == 0)
{
return;
}

this.Assignment = this.Assignment.Union(partitions).ToArray();
this.Subscription = consumer.Subscription;
_flowManager.Stop();
_flowManager.Start(consumer);

_partitionsAssignedHandlers.ForEach(handler => handler(_dependencyResolver, consumer, partitions));
}

private void FirePartitionRevokedHandlers(IConsumer<byte[], byte[]> consumer, List<Confluent.Kafka.TopicPartitionOffset> partitions)
{
if (_stopTheWorldStrategy)
{
_partitionsRevokedHandlers.ForEach(handler => handler(_dependencyResolver, consumer, partitions));
this.Assignment = new List<TopicPartition>();
this.Subscription = new List<string>();
_currentPartitionsOffsets.Clear();
_flowManager.Stop();
return;
}

this.Assignment = this.Assignment.Except(partitions.Select(x => x.TopicPartition)).ToArray();
this.Subscription = consumer.Subscription;
foreach (var partition in partitions)
{
_currentPartitionsOffsets.TryRemove(partition.TopicPartition, out _);
}

_flowManager.Stop();
_flowManager.Start(consumer);
_partitionsRevokedHandlers.ForEach(handler => handler(_dependencyResolver, consumer, partitions));
}

private void InvalidateConsumer()
{
_consumer?.Close();
Expand Down
41 changes: 36 additions & 5 deletions src/KafkaFlow/Consumers/ConsumerManager.cs
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,17 @@
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using Confluent.Kafka;
using KafkaFlow.Configuration;
using KafkaFlow.Extensions;

namespace KafkaFlow.Consumers;

internal class ConsumerManager : IConsumerManager
{
private readonly IDependencyResolver _dependencyResolver;
private readonly ILogHandler _logHandler;
private readonly bool _stopTheWorldStrategy;

private Timer _evaluateWorkersCountTimer;

Expand All @@ -26,7 +29,7 @@ public ConsumerManager(
this.Consumer = consumer;
this.WorkerPool = consumerWorkerPool;
this.Feeder = feeder;

_stopTheWorldStrategy = consumer.Configuration.GetKafkaConfig().PartitionAssignmentStrategy.IsStopTheWorldStrategy();
this.Consumer.OnPartitionsAssigned((_, _, partitions) => this.OnPartitionAssigned(partitions));
this.Consumer.OnPartitionsRevoked((_, _, partitions) => this.OnPartitionRevoked(partitions));
}
Expand Down Expand Up @@ -104,9 +107,23 @@ private void OnPartitionRevoked(IEnumerable<Confluent.Kafka.TopicPartitionOffset
{
_logHandler.Warning(
"Partitions revoked",
this.GetConsumerLogInfo(topicPartitions.Select(x => x.TopicPartition)));
this.GetConsumerLogInfo(topicPartitions?.Select(x => x.TopicPartition).ToArray()
?? Array.Empty<TopicPartition>()));

this.WorkerPool.StopAsync().GetAwaiter().GetResult();

if (_stopTheWorldStrategy)
{
return;
}

var assignedPartitions = Consumer.Assignment;
var workersCount = this.CalculateWorkersCount(assignedPartitions).GetAwaiter().GetResult();

this.WorkerPool
.StartAsync(assignedPartitions, workersCount)
.GetAwaiter()
.GetResult();
}

private void OnPartitionAssigned(IReadOnlyCollection<Confluent.Kafka.TopicPartition> partitions)
Expand All @@ -115,10 +132,16 @@ private void OnPartitionAssigned(IReadOnlyCollection<Confluent.Kafka.TopicPartit
"Partitions assigned",
this.GetConsumerLogInfo(partitions));

var workersCount = this.CalculateWorkersCount(partitions).GetAwaiter().GetResult();
if (_stopTheWorldStrategy is false)
{
this.WorkerPool.StopAsync().GetAwaiter().GetResult();
}

var assignedPartitions = Consumer.Assignment;
var workersCount = this.CalculateWorkersCount(assignedPartitions).GetAwaiter().GetResult();

this.WorkerPool
.StartAsync(partitions, workersCount)
.StartAsync(assignedPartitions, workersCount)
.GetAwaiter()
.GetResult();
}
Expand All @@ -127,7 +150,7 @@ private void OnPartitionAssigned(IReadOnlyCollection<Confluent.Kafka.TopicPartit
{
this.Consumer.Configuration.GroupId,
this.Consumer.Configuration.ConsumerName,
Topics = partitions
UpdatedTopics = partitions
.GroupBy(x => x.Topic)
.Select(
x => new
Expand All @@ -136,6 +159,14 @@ private void OnPartitionAssigned(IReadOnlyCollection<Confluent.Kafka.TopicPartit
PartitionsCount = x.Count(),
Partitions = x.Select(y => y.Partition.Value),
}),
CurrentTopics = Consumer.Assignment.GroupBy(x => x.Topic)
.Select(
x => new
{
x.First().Topic,
PartitionsCount = x.Count(),
Partitions = x.Select(y => y.Partition.Value),
}),
};

private async Task<int> CalculateWorkersCount(IEnumerable<Confluent.Kafka.TopicPartition> partitions)
Expand Down
17 changes: 17 additions & 0 deletions src/KafkaFlow/Extensions/StrategyExtensions.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
using Confluent.Kafka;

namespace KafkaFlow.Extensions;

/// <summary>
/// Strategy extension methods.
/// </summary>
public static class StrategyExtensions
{
/// <summary>
/// Determine if the strategy is a "stop the world" behavior.
/// </summary>
/// <param name="strategy">Strategy</param>
/// <returns></returns>
public static bool IsStopTheWorldStrategy(this PartitionAssignmentStrategy? strategy) =>
strategy is null or PartitionAssignmentStrategy.Range or PartitionAssignmentStrategy.RoundRobin;
}

0 comments on commit 7b6f0ee

Please sign in to comment.