Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enhancement/310 #340

Merged
merged 9 commits into from
Jul 3, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion core/Kafka/IRecordCollector.cs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
using System.Collections.Generic;
using System.Collections.Generic;
using Confluent.Kafka;
using Streamiz.Kafka.Net.SerDes;

Expand All @@ -12,5 +12,6 @@ internal interface IRecordCollector
void Close();
void Send<K, V>(string topic, K key, V value, Headers headers, long timestamp, ISerDes<K> keySerializer, ISerDes<V> valueSerializer);
void Send<K, V>(string topic, K key, V value, Headers headers, int partition, long timestamp, ISerDes<K> keySerializer, ISerDes<V> valueSerializer);
int PartitionsFor(string topic);
}
}
1 change: 0 additions & 1 deletion core/Kafka/Internal/DefaultKafkaClientSupplier.cs
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,6 @@ public IProducer<byte[], byte[]> GetProducer(ProducerConfig config)
producerStatisticsHandler.Publish(statistics);
});
}

return builder.Build();
}

Expand Down
30 changes: 26 additions & 4 deletions core/Kafka/Internal/RecordCollector.cs
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
using System.Text;
using Microsoft.Extensions.Logging;
using Streamiz.Kafka.Net.Metrics;
Expand Down Expand Up @@ -81,24 +82,28 @@ public void Clear()
private readonly IStreamConfig configuration;
private readonly TaskId id;
private readonly Sensor droppedRecordsSensor;
private readonly IAdminClient _adminClient;
private Exception exception = null;

private readonly string logPrefix;
private readonly ILogger log = Logger.GetLogger(typeof(RecordCollector));

private readonly ConcurrentDictionary<TopicPartition, long> collectorsOffsets =
new ConcurrentDictionary<TopicPartition, long>();
private readonly ConcurrentDictionary<TopicPartition, long> collectorsOffsets = new();

private readonly RetryRecordContext retryRecordContext = new RetryRecordContext();
private readonly RetryRecordContext retryRecordContext = new();

public IDictionary<TopicPartition, long> CollectorOffsets => collectorsOffsets.ToDictionary();

public RecordCollector(string logPrefix, IStreamConfig configuration, TaskId id, Sensor droppedRecordsSensor)
public IDictionary<string, (int, DateTime)> cachePartitionsForTopics =
new Dictionary<string, (int, DateTime)>();

public RecordCollector(string logPrefix, IStreamConfig configuration, TaskId id, Sensor droppedRecordsSensor, IAdminClient adminClient)
{
this.logPrefix = $"{logPrefix}";
this.configuration = configuration;
this.id = id;
this.droppedRecordsSensor = droppedRecordsSensor;
_adminClient = adminClient;
}

public void Init(ref IProducer<byte[], byte[]> producer)
Expand Down Expand Up @@ -136,6 +141,8 @@ public void Close()
}
}
}

_adminClient?.Dispose();
}

public void Flush()
Expand Down Expand Up @@ -416,5 +423,20 @@ private void CheckForException()
throw e;
}
}

public int PartitionsFor(string topic)
{
var adminConfig = configuration.ToAdminConfig("");
var refreshInterval = adminConfig.TopicMetadataRefreshIntervalMs ?? 5 * 60 * 1000;

if (cachePartitionsForTopics.ContainsKey(topic) &&
cachePartitionsForTopics[topic].Item2 + TimeSpan.FromMilliseconds(refreshInterval) > DateTime.Now)
return cachePartitionsForTopics[topic].Item1;

var metadata = _adminClient.GetMetadata(topic, TimeSpan.FromSeconds(5));
var partitionCount = metadata.Topics.FirstOrDefault(t => t.Topic.Equals(topic))!.Partitions.Count;
cachePartitionsForTopics.Add(topic, (partitionCount, DateTime.Now));
return partitionCount;
}
}
}
2 changes: 1 addition & 1 deletion core/Metrics/StreamMetric.cs
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ public class StreamMetric
private readonly IMetricValueProvider metricValueProvider;
private readonly MetricConfig config;

private object lastValue;
private object lastValue = -1L;

internal StreamMetric(
MetricName metricName,
Expand Down
16 changes: 12 additions & 4 deletions core/Mock/Kafka/MockCluster.cs
Original file line number Diff line number Diff line change
Expand Up @@ -756,7 +756,12 @@ internal ConsumeResult<byte[], byte[]> Consume(MockConsumer mockConsumer, TimeSp
Topic = p.Topic,
Partition = p.Partition,
Message = new Message<byte[], byte[]>
{Key = record.Key, Value = record.Value}
{
Key = record.Key,
Value = record.Value,
Timestamp = new Timestamp(
record.Timestamp ?? DateTime.Now, TimestampType.NotAvailable)
}
};
++offset.OffsetConsumed;
log.LogDebug($"Consumer {mockConsumer.Name} consume message from topic/partition {p}, offset {offset.OffsetConsumed}");
Expand Down Expand Up @@ -795,7 +800,7 @@ internal DeliveryReport<byte[], byte[]> Produce(string topic, Message<byte[], by
else
partition = Math.Abs(MurMurHash3.Hash(new MemoryStream(message.Key))) % topics[topic].PartitionNumber;

topics[topic].AddMessage(message.Key, message.Value, partition);
topics[topic].AddMessage(message.Key, message.Value, partition, message.Timestamp.UnixTimestampMs);

r.Message = message;
r.Partition = partition;
Expand All @@ -809,18 +814,21 @@ internal DeliveryReport<byte[], byte[]> Produce(string topic, Message<byte[], by

internal DeliveryReport<byte[], byte[]> Produce(TopicPartition topicPartition, Message<byte[], byte[]> message)
{
if (topicPartition.Partition.Equals(Partition.Any))
return Produce(topicPartition.Topic, message);

DeliveryReport<byte[], byte[]> r = new DeliveryReport<byte[], byte[]>();
r.Status = PersistenceStatus.NotPersisted;
CreateTopic(topicPartition.Topic);
if (topics[topicPartition.Topic].PartitionNumber > topicPartition.Partition)
{
topics[topicPartition.Topic].AddMessage(message.Key, message.Value, topicPartition.Partition);
topics[topicPartition.Topic].AddMessage(message.Key, message.Value, topicPartition.Partition, message.Timestamp.UnixTimestampMs);
r.Status = PersistenceStatus.Persisted;
}
else
{
topics[topicPartition.Topic].CreateNewPartitions(topicPartition.Partition);
topics[topicPartition.Topic].AddMessage(message.Key, message.Value, topicPartition.Partition);
topics[topicPartition.Topic].AddMessage(message.Key, message.Value, topicPartition.Partition, message.Timestamp.UnixTimestampMs);
r.Status = PersistenceStatus.Persisted;
}

Expand Down
13 changes: 9 additions & 4 deletions core/Mock/Kafka/MockPartition.cs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ namespace Streamiz.Kafka.Net.Mock.Kafka
{
internal class MockPartition
{
private readonly List<(byte[], byte[])> log = new();
private readonly List<(byte[], byte[], long)> log = new();
private readonly Dictionary<long, long> mappingOffsets = new();

public MockPartition(int indice)
Expand All @@ -21,10 +21,10 @@ public MockPartition(int indice)
public long LowOffset { get; private set; } = Offset.Unset;
public long HighOffset { get; private set; } = Offset.Unset;

internal void AddMessageInLog(byte[] key, byte[] value)
internal void AddMessageInLog(byte[] key, byte[] value, long timestamp)
{
mappingOffsets.Add(Size, log.Count);
log.Add((key, value));
log.Add((key, value, timestamp));
++Size;
UpdateOffset();
}
Expand All @@ -43,7 +43,12 @@ internal TestRecord<byte[], byte[]> GetMessage(long offset)
if (mappingOffsets.ContainsKey(offset))
{
var record = log[(int) mappingOffsets[offset]];
return new TestRecord<byte[], byte[]> {Key = record.Item1, Value = record.Item2};
return new TestRecord<byte[], byte[]>
{
Key = record.Item1,
Value = record.Item2,
Timestamp = record.Item3.FromMilliseconds()
};
}

return null;
Expand Down
4 changes: 2 additions & 2 deletions core/Mock/Kafka/MockTopic.cs
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,9 @@ public MockTopic(string topic, int part)
public int PartitionNumber { get; private set; }
public IEnumerable<MockPartition> Partitions => partitions.AsReadOnly();

public void AddMessage(byte[] key, byte[] value, int partition)
public void AddMessage(byte[] key, byte[] value, int partition, long timestamp = 0)
{
partitions[partition].AddMessageInLog(key, value);
partitions[partition].AddMessageInLog(key, value, timestamp);
}

public TestRecord<byte[], byte[]> GetMessage(int partition, long consumerOffset)
Expand Down
5 changes: 3 additions & 2 deletions core/Mock/TaskSynchronousTopologyDriver.cs
Original file line number Diff line number Diff line change
Expand Up @@ -78,13 +78,13 @@ public TaskSynchronousTopologyDriver(string clientId, InternalTopologyBuilder to
partitionsByTaskId.Add(taskId, new List<TopicPartition> {part});
}

var adminClient = this.supplier.GetAdmin(configuration.ToAdminConfig($"{clientId}-admin"));
ProcessorTopology globalTaskTopology = topologyBuilder.BuildGlobalStateTopology();
hasGlobalTopology = globalTaskTopology != null;
if (hasGlobalTopology)
{
var globalConsumer =
this.supplier.GetGlobalConsumer(configuration.ToGlobalConsumerConfig($"{clientId}-global-consumer"));
var adminClient = this.supplier.GetAdmin(configuration.ToAdminConfig($"{clientId}-admin"));
var stateManager =
new GlobalStateManager(globalConsumer, globalTaskTopology, adminClient, configuration);
globalProcessorContext = new GlobalProcessorContext(configuration, stateManager, metricsRegistry);
Expand All @@ -105,7 +105,8 @@ public TaskSynchronousTopologyDriver(string clientId, InternalTopologyBuilder to
topologyBuilder.BuildTopology(taskId).GetSourceProcessor(requestTopic),
this.supplier.GetProducer(configuration.ToProducerConfig($"ext-thread-producer-{requestTopic}")),
configuration,
metricsRegistry));
metricsRegistry,
adminClient));
}
}

Expand Down
28 changes: 28 additions & 0 deletions core/Processors/DefaultStreamPartitioner.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
using Confluent.Kafka;

namespace Streamiz.Kafka.Net.Processors
{
/// <summary>
/// Forward the source partition as the sink partition of the record if there is enough sink partitions, Partition.Any otherwise
/// </summary>
/// <typeparam name="K">Key record type</typeparam>
/// <typeparam name="V">Value record type</typeparam>
public class DefaultStreamPartitioner<K, V> : IStreamPartitioner<K, V>
{
/// <summary>
/// Function used to determine how records are distributed among partitions of the topic
/// </summary>
/// <param name="topic">Sink topic name</param>
/// <param name="key">record's key</param>
/// <param name="value">record's value</param>
/// <param name="sourcePartition">record's source partition</param>
/// <param name="numPartitions">number partitions of the sink topic</param>
/// <returns>Return the source partition as the sink partition of the record if there is enough sink partitions, Partition.Any otherwise</returns>
public Partition Partition(string topic, K key, V value, Partition sourcePartition, int numPartitions)
{
if (sourcePartition.Value <= numPartitions - 1)
return sourcePartition;
return Confluent.Kafka.Partition.Any;
}
}
}
5 changes: 3 additions & 2 deletions core/Processors/ExternalProcessorTopologyExecutor.cs
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,8 @@ public ExternalProcessorTopologyExecutor(
ISourceProcessor sourceProcessor,
IProducer<byte[], byte[]> producer,
IStreamConfig config,
StreamMetricsRegistry streamMetricsRegistry)
StreamMetricsRegistry streamMetricsRegistry,
IAdminClient adminClient)
{
this.threadId = threadId;
this.producerId = producer.Name;
Expand All @@ -140,7 +141,7 @@ public ExternalProcessorTopologyExecutor(
processSensor = TaskMetrics.ProcessSensor(threadId, taskId, streamMetricsRegistry);
processLatencySensor = TaskMetrics.ProcessLatencySensor(threadId, taskId, streamMetricsRegistry);

recordCollector = new RecordCollector(logPrefix, config, taskId, droppedRecordsSensor);
recordCollector = new RecordCollector(logPrefix, config, taskId, droppedRecordsSensor, adminClient);
recordCollector.Init(ref producer);

context = new ProcessorContext(ExternalStreamTask.Create(taskId), config, null, streamMetricsRegistry);
Expand Down
3 changes: 2 additions & 1 deletion core/Processors/ExternalStreamThread.cs
Original file line number Diff line number Diff line change
Expand Up @@ -372,7 +372,8 @@ private ExternalProcessorTopologyExecutor GetExternalProcessorTopology(string to
topology.GetSourceProcessor(topic),
producer,
configuration,
streamMetricsRegistry);
streamMetricsRegistry,
adminClient);
externalProcessorTopologies.Add(topic, externalProcessorTopologyExecutor);

return externalProcessorTopologyExecutor;
Expand Down
18 changes: 18 additions & 0 deletions core/Processors/IStreamPartitioner.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
using Confluent.Kafka;

namespace Streamiz.Kafka.Net.Processors
{
public interface IStreamPartitioner<K, V>
{
/// <summary>
/// Function used to determine how records are distributed among partitions of the topic
/// </summary>
/// <param name="topic">Sink topic name</param>
/// <param name="key">record's key</param>
/// <param name="value">record's value</param>
/// <param name="sourcePartition">record's source partition</param>
/// <param name="numPartitions">number partitions of the sink topic</param>
/// <returns>Return the destination partition for the current record</returns>
Partition Partition(string topic, K key, V value, Partition sourcePartition, int numPartitions);
}
}
19 changes: 7 additions & 12 deletions core/Processors/Internal/NodeFactory.cs
Original file line number Diff line number Diff line change
Expand Up @@ -80,21 +80,16 @@ internal class SinkNodeFactory<K, V> : NodeFactory, ISinkNodeFactory
public IRecordTimestampExtractor<K, V> TimestampExtractor { get; }
public ISerDes<K> KeySerdes { get; }
public ISerDes<V> ValueSerdes { get; }
public Func<string, K, V, int> ProducedPartitioner { get; }
public IStreamPartitioner<K, V> ProducedPartitioner { get; }

public string Topic
{
get
{
return TopicExtractor is StaticTopicNameExtractor<K, V> ?
((StaticTopicNameExtractor<K, V>)TopicExtractor).TopicName :
null;
}
}
public string Topic =>
(TopicExtractor as StaticTopicNameExtractor<K, V>)?.TopicName;

public SinkNodeFactory(string name, string[] previous, ITopicNameExtractor<K, V> topicExtractor,
public SinkNodeFactory(string name, string[] previous, ITopicNameExtractor<K, V> topicExtractor,
IRecordTimestampExtractor<K, V> timestampExtractor,
ISerDes<K> keySerdes, ISerDes<V> valueSerdes, Func<string, K, V, int> producedPartitioner)
ISerDes<K> keySerdes,
ISerDes<V> valueSerdes,
IStreamPartitioner<K, V> producedPartitioner)
: base(name, previous)
{
TopicExtractor = topicExtractor;
Expand Down
18 changes: 18 additions & 0 deletions core/Processors/Internal/WrapperStreamPartitioner.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
using System;
using Confluent.Kafka;

namespace Streamiz.Kafka.Net.Processors.Internal
{
internal class WrapperStreamPartitioner<K, V> : IStreamPartitioner<K, V>
{
private readonly Func<string, K, V, Partition, int, Partition> _partitioner;

public WrapperStreamPartitioner(Func<string, K, V, Partition, int, Partition> partitioner)
{
_partitioner = partitioner;
}

public Partition Partition(string topic, K key, V value, Partition sourcePartition, int numPartitions)
=> _partitioner(topic, key, value, sourcePartition, numPartitions);
}
}
27 changes: 22 additions & 5 deletions core/Processors/SinkProcessor.cs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
using System;
using Confluent.Kafka;
using Microsoft.Extensions.Logging;
using Streamiz.Kafka.Net.Errors;
using Streamiz.Kafka.Net.Processors.Internal;
Expand All @@ -15,9 +16,15 @@ internal class SinkProcessor<K, V> : AbstractProcessor<K, V>, ISinkProcessor
{
private ITopicNameExtractor<K, V> topicNameExtractor;
private readonly IRecordTimestampExtractor<K, V> timestampExtractor;
private readonly Func<string, K, V, int> partitioner;
private readonly IStreamPartitioner<K, V> partitioner;

internal SinkProcessor(string name, ITopicNameExtractor<K, V> topicNameExtractor, IRecordTimestampExtractor<K, V> timestampExtractor, ISerDes<K> keySerdes, ISerDes<V> valueSerdes, Func<string, K, V, int> partitioner = null)
internal SinkProcessor(
string name,
ITopicNameExtractor<K, V> topicNameExtractor,
IRecordTimestampExtractor<K, V> timestampExtractor,
ISerDes<K> keySerdes,
ISerDes<V> valueSerdes,
IStreamPartitioner<K, V> partitioner = null)
: base(name, keySerdes, valueSerdes)
{
this.topicNameExtractor = topicNameExtractor;
Expand Down Expand Up @@ -62,11 +69,21 @@ public override void Process(K key, V value)
{
throw new StreamsException($"Invalid (negative) timestamp of {timestamp} for output record <{key}:{value}>.");
}

if (partitioner != null)
{
int partition = partitioner.Invoke(topicName, key, value);
Context.RecordCollector.Send(topicName, key, value, Context.RecordContext.Headers, partition, timestamp, KeySerDes,
Partition partition = partitioner.Partition(
topicName,
key, value,
Context.Partition,
Context.RecordCollector.PartitionsFor(topicName));

Context.RecordCollector.Send(
topicName,
key, value,
Context.RecordContext.Headers,
partition,
timestamp, KeySerDes,
ValueSerDes);
}
else
Expand Down
Loading
Loading