@@ -21,7 +21,7 @@ class Kafka::KafkaController::InternalDeliveryReportCb : public RdKafka::Deliver
21
21
public:
22
22
~InternalDeliveryReportCb ()
23
23
{
24
- printf (" DeliveryReportCb destroyed\n " );
24
+ // printf("DeliveryReportCb destroyed\n");
25
25
}
26
26
27
27
void set_callback (std::function<void (RdKafka::Message &)> cb) {
@@ -49,7 +49,7 @@ class Kafka::KafkaController::InternalRebalanceCb : public RdKafka::RebalanceCb
49
49
50
50
~InternalRebalanceCb ()
51
51
{
52
- printf (" RebalanceCb destroyed\n " );
52
+ // printf("RebalanceCb destroyed\n");
53
53
}
54
54
55
55
void set_callback (std::function<void (RdKafka::KafkaConsumer *, RdKafka::ErrorCode, std::vector<RdKafka::TopicPartition *> &)> cb) {
@@ -68,7 +68,7 @@ class Kafka::KafkaController::InternalLogger : public RdKafka::EventCb {
68
68
69
69
~InternalLogger ()
70
70
{
71
- printf (" Logger destroyed\n " );
71
+ // printf("Logger destroyed\n");
72
72
}
73
73
74
74
void set_callback (std::function<void (const LogLevel logLevel, const std::string &message)> cb) {
@@ -134,6 +134,11 @@ static RdKafka::KafkaConsumer *create_consumer(const std::string &brokers, const
134
134
return nullptr ;
135
135
}
136
136
137
+ // Set the auto offset reset to earliest.
138
+ if (conf->set (" auto.offset.reset" , " earliest" , errstr) != RdKafka::Conf::CONF_OK) {
139
+ return nullptr ;
140
+ }
141
+
137
142
// Create a Rdkafka Consumer
138
143
RdKafka::KafkaConsumer *consumer = RdKafka::KafkaConsumer::create (conf, errstr);
139
144
if (!consumer) {
@@ -204,12 +209,15 @@ void Kafka::KafkaController::add_producer(const std::string &brokers, const std:
204
209
205
210
// Set the delivery report callback
206
211
deliveryReportCb->set_callback ([this ](RdKafka::Message &message) {
207
- printf (" Message delivered to partition %d at offset %" PRId64 " \n " , message.partition (), message.offset ());
212
+ // printf("Message delivered to partition %d at offset %" PRId64 "\n", message.partition(), message.offset());
208
213
209
214
if (message.err ())
210
215
{
211
216
if (m_error_callback)
212
217
m_error_callback (" Message delivery failed: " + RdKafka::err2str (message.err ()));
218
+ } else {
219
+ if (m_log_callback && m_log_level >= LogLevel::DEBUG)
220
+ m_log_callback (LogLevel::DEBUG, " Message delivered to partition " + std::to_string (message.partition ()) + " at offset " + std::to_string (message.offset ()));
213
221
}
214
222
});
215
223
loggerCb->set_callback ([this ](const LogLevel logLevel, const std::string &message) {
@@ -241,8 +249,7 @@ void Kafka::KafkaController::add_producer(const std::string &brokers, const std:
241
249
m_producers[channel].push_back (producerContext);
242
250
}
243
251
244
- void Kafka::KafkaController::add_consumer (const std::string &brokers, const std::string &topic, const std::string &group_id, const uint32_t channel)
245
- {
252
+ void Kafka::KafkaController::add_consumer (const std::string &brokers, const std::vector<std::string> &topics, const std::string &group_id, const uint32_t channel) {
246
253
std::string errstr;
247
254
std::shared_ptr<InternalRebalanceCb> rebalance_cb;
248
255
std::shared_ptr<InternalLogger> loggerCb;
@@ -286,24 +293,19 @@ void Kafka::KafkaController::add_consumer(const std::string &brokers, const std:
286
293
m_log_callback (logLevel, message);
287
294
});
288
295
289
- // Create topic handle
290
- RdKafka::Topic *rd_topic = RdKafka::Topic::create ( consumer, topic, nullptr , errstr );
296
+ // Subscribe.
297
+ consumer-> subscribe (topics );
291
298
292
299
// Create shared objects.
293
300
std::shared_ptr<RdKafka::KafkaConsumer> consumer_shared (consumer);
294
- std::shared_ptr<RdKafka::Topic> topic_shared (rd_topic);
295
301
std::shared_ptr<InternalLogger> loggerCb_shared (loggerCb);
296
302
std::shared_ptr<InternalRebalanceCb> rebalance_cb_shared (rebalance_cb);
297
303
298
304
// Create Context, if it doesn't exist.
299
305
if (m_consumers.find (channel) == m_consumers.end ())
300
306
{
301
- // Create the thread.
302
- std::thread *consumer_thread = new std::thread (&KafkaController::_consumer_thread, this , m_consumers[channel], channel);
303
- std::shared_ptr<std::thread> thread_shared (consumer_thread);
304
-
305
307
std::shared_ptr<ConsumerContext> consumerContext = std::make_shared<ConsumerContext>(
306
- thread_shared ,
308
+ channel ,
307
309
std::vector<std::shared_ptr<RdKafka::KafkaConsumer>>(),
308
310
loggerCb_shared,
309
311
rebalance_cb_shared
@@ -313,7 +315,7 @@ void Kafka::KafkaController::add_consumer(const std::string &brokers, const std:
313
315
}
314
316
315
317
// Add the consumer to the vector.
316
- m_consumers[channel]->get_consumers (). push_back (consumer_shared);
318
+ m_consumers[channel]->add_consumer (consumer_shared);
317
319
318
320
// Wait until the consumer is connected.
319
321
// Timeout after the MAX_TIMEOUT_MS.
@@ -375,6 +377,8 @@ bool Kafka::KafkaController::send(const uint32_t channel, const void *data, cons
375
377
nullptr );
376
378
}
377
379
380
+ producer->get_producer ()->flush (MAX_TIMEOUT_MS);
381
+
378
382
if (err) {
379
383
if (m_error_callback)
380
384
m_error_callback (" Failed to produce message: " + RdKafka::err2str (err));
@@ -451,11 +455,6 @@ void Kafka::KafkaController::set_error_callback(const std::function<void(const s
451
455
m_error_callback = callback;
452
456
}
453
457
454
- void Kafka::KafkaController::_consumer_thread (std::shared_ptr<ConsumerContext> &consumer, const uint32_t channel)
455
- {
456
- std::printf (" Consumer thread started : %s\n " , std::to_string (channel).c_str ());
457
- }
458
-
459
458
// / ------------------------- Packet ------------------------- ///
460
459
461
460
Kafka::Packet::~Packet () {
@@ -604,5 +603,34 @@ std::shared_ptr<Kafka::KafkaController::InternalRebalanceCb> Kafka::KafkaControl
604
603
}
605
604
606
605
std::vector<std::shared_ptr<RdKafka::KafkaConsumer>> Kafka::KafkaController::ConsumerContext::get_consumers () const {
607
- return std::vector<std::shared_ptr<RdKafka::KafkaConsumer>>();
606
+ return consumers;
607
+ }
608
+
609
+ void Kafka::KafkaController::ConsumerContext::add_consumer (std::shared_ptr<RdKafka::KafkaConsumer> consumer) {
610
+ consumers.emplace_back (std::move (consumer));
611
+ }
612
+
613
+ void Kafka::KafkaController::ConsumerContext::_consumer_thread (const uint32_t channel)
614
+ {
615
+
616
+ while (is_running ())
617
+ {
618
+ std::vector<std::shared_ptr<Packet>> packets;
619
+ for (auto &consumer : consumers)
620
+ {
621
+ RdKafka::Message *message = consumer->consume (1000 );
622
+ if (message->err () == RdKafka::ERR_NO_ERROR)
623
+ {
624
+ std::shared_ptr<Packet> packet = std::make_shared<Packet>(new int8_t [message->len ()], message->len ());
625
+ memcpy (packet->get_data (), message->payload (), packet->get_size ());
626
+ packets.push_back (packet);
627
+ }
628
+ delete message;
629
+ }
630
+
631
+ for (auto &packet : packets) {
632
+ PacketNode *node = new PacketNode (packet);
633
+ push_next_packet (node);
634
+ }
635
+ }
608
636
}
0 commit comments