@@ -36,7 +36,7 @@ class KafkaWritableFile : public CloudLogWritableFile {
3636 producer_ (producer),
3737 topic_(topic),
3838 current_offset_(0 ) {
39- Log (InfoLogLevel::DEBUG_LEVEL, env_->info_log_ ,
39+ Log (InfoLogLevel::DEBUG_LEVEL, env_->GetLogger () ,
4040 " [kafka] WritableFile opened file %s" , fname_.c_str ());
4141 }
4242
@@ -72,20 +72,20 @@ Status KafkaWritableFile::ProduceRaw(const std::string& operation_name,
7272 message.size (), &fname_ /* Partitioning key */ , nullptr );
7373
7474 if (resp == RdKafka::ERR_NO_ERROR) {
75- Log (InfoLogLevel::DEBUG_LEVEL, env_->info_log_ ,
75+ Log (InfoLogLevel::DEBUG_LEVEL, env_->GetLogger () ,
7676 " [kafka] WritableFile %s file %s %ld" , fname_.c_str (),
7777 operation_name.c_str (), message.size ());
7878 return Status::OK ();
7979 } else if (resp == RdKafka::ERR__QUEUE_FULL) {
8080 const std::string formatted_err = RdKafka::err2str (resp);
81- Log (InfoLogLevel::DEBUG_LEVEL, env_->info_log_ ,
81+ Log (InfoLogLevel::DEBUG_LEVEL, env_->GetLogger () ,
8282 " [kafka] WritableFile src %s %s error %s" , fname_.c_str (),
8383 operation_name.c_str (), formatted_err.c_str ());
8484
8585 return Status::Busy (topic_->name ().c_str (), RdKafka::err2str (resp).c_str ());
8686 } else {
8787 const std::string formatted_err = RdKafka::err2str (resp);
88- Log (InfoLogLevel::DEBUG_LEVEL, env_->info_log_ ,
88+ Log (InfoLogLevel::DEBUG_LEVEL, env_->GetLogger () ,
8989 " [kafka] WritableFile src %s %s error %s" , fname_.c_str (),
9090 operation_name.c_str (), formatted_err.c_str ());
9191
@@ -106,7 +106,7 @@ Status KafkaWritableFile::Append(const Slice& data) {
106106}
107107
108108Status KafkaWritableFile::Close () {
109- Log (InfoLogLevel::DEBUG_LEVEL, env_->info_log_ ,
109+ Log (InfoLogLevel::DEBUG_LEVEL, env_->GetLogger () ,
110110 " [kafka] S3WritableFile closing %s" , fname_.c_str ());
111111
112112 std::string serialized_data;
@@ -128,7 +128,7 @@ Status KafkaWritableFile::Flush() {
128128 while (status_.ok () && !(done = (producer_->outq_len () == 0 )) &&
129129 !(timeout = (std::chrono::microseconds (env_->NowMicros ()) - start >
130130 kFlushTimeout ))) {
131- Log (InfoLogLevel::DEBUG_LEVEL, env_->info_log_ ,
131+ Log (InfoLogLevel::DEBUG_LEVEL, env_->GetLogger () ,
132132 " [kafka] WritableFile src %s "
133133 " Waiting on flush: Output queue length: %d" ,
134134 fname_.c_str (), producer_->outq_len ());
@@ -137,23 +137,23 @@ Status KafkaWritableFile::Flush() {
137137 }
138138
139139 if (done) {
140- Log (InfoLogLevel::DEBUG_LEVEL, env_->info_log_ ,
140+ Log (InfoLogLevel::DEBUG_LEVEL, env_->GetLogger () ,
141141 " [kafka] WritableFile src %s Flushed" , fname_.c_str ());
142142 } else if (timeout) {
143- Log (InfoLogLevel::DEBUG_LEVEL, env_->info_log_ ,
143+ Log (InfoLogLevel::DEBUG_LEVEL, env_->GetLogger () ,
144144 " [kafka] WritableFile src %s Flushing timed out after %" PRId64 " us" ,
145145 fname_.c_str (), kFlushTimeout .count ());
146146 status_ = Status::TimedOut ();
147147 } else {
148- Log (InfoLogLevel::DEBUG_LEVEL, env_->info_log_ ,
148+ Log (InfoLogLevel::DEBUG_LEVEL, env_->GetLogger () ,
149149 " [kafka] WritableFile src %s Flush interrupted" , fname_.c_str ());
150150 }
151151
152152 return status_;
153153}
154154
155155Status KafkaWritableFile::LogDelete () {
156- Log (InfoLogLevel::DEBUG_LEVEL, env_->info_log_ , " [kafka] LogDelete %s" ,
156+ Log (InfoLogLevel::DEBUG_LEVEL, env_->GetLogger () , " [kafka] LogDelete %s" ,
157157 fname_.c_str ());
158158
159159 std::string serialized_data;
@@ -176,7 +176,7 @@ class KafkaController : public CloudLogControllerImpl {
176176 consumer_->stop (consumer_topic_.get (), partitions_[i]->partition ());
177177 }
178178
179- Log (InfoLogLevel::DEBUG_LEVEL, env_->info_log_ ,
179+ Log (InfoLogLevel::DEBUG_LEVEL, env_->GetLogger () ,
180180 " [%s] KafkaController closed." , Name ());
181181 }
182182
@@ -237,7 +237,7 @@ Status KafkaController::Initialize(CloudEnv* env) {
237237 s = Status::InvalidArgument (" Failed adding specified conf to Kafka conf" ,
238238 conf_errstr.c_str ());
239239
240- Log (InfoLogLevel::ERROR_LEVEL, env->info_log_ ,
240+ Log (InfoLogLevel::ERROR_LEVEL, env->GetLogger () ,
241241 " [aws] NewAwsEnv Kafka conf set error: %s" , s.ToString ().c_str ());
242242 return s;
243243 }
@@ -250,18 +250,18 @@ Status KafkaController::Initialize(CloudEnv* env) {
250250 s = Status::InvalidArgument (" Failed creating Kafka producer" ,
251251 producer_errstr.c_str ());
252252
253- Log (InfoLogLevel::ERROR_LEVEL, env->info_log_ ,
253+ Log (InfoLogLevel::ERROR_LEVEL, env->GetLogger () ,
254254 " [%s] Kafka producer error: %s" , Name (), s.ToString ().c_str ());
255255 } else if (!consumer_) {
256256 s = Status::InvalidArgument (" Failed creating Kafka consumer" ,
257257 consumer_errstr.c_str ());
258258
259- Log (InfoLogLevel::ERROR_LEVEL, env->info_log_ ,
259+ Log (InfoLogLevel::ERROR_LEVEL, env->GetLogger () ,
260260 " [%s] Kafka consumer error: %s" , Name (), s.ToString ().c_str ());
261261 } else {
262262 const std::string topic_name = env->GetSrcBucketName ();
263263
264- Log (InfoLogLevel::DEBUG_LEVEL, env->info_log_ ,
264+ Log (InfoLogLevel::DEBUG_LEVEL, env->GetLogger () ,
265265 " [%s] KafkaController opening stream %s using cachedir '%s'" , Name (),
266266 topic_name.c_str (), cache_dir_.c_str ());
267267
@@ -288,8 +288,9 @@ Status KafkaController::TailStream() {
288288 return status_;
289289 }
290290
291- Log (InfoLogLevel::DEBUG_LEVEL, env_->info_log_ , " [%s] TailStream topic %s %s" ,
292- Name (), consumer_topic_->name ().c_str (), status_.ToString ().c_str ());
291+ Log (InfoLogLevel::DEBUG_LEVEL, env_->GetLogger (),
292+ " [%s] TailStream topic %s %s" , Name (), consumer_topic_->name ().c_str (),
293+ status_.ToString ().c_str ());
293294
294295 Status lastErrorStatus;
295296 int retryAttempt = 0 ;
@@ -311,13 +312,13 @@ Status KafkaController::TailStream() {
311312 // Apply the payload to local filesystem
312313 status_ = Apply (sl);
313314 if (!status_.ok ()) {
314- Log (InfoLogLevel::ERROR_LEVEL, env_->info_log_ ,
315+ Log (InfoLogLevel::ERROR_LEVEL, env_->GetLogger () ,
315316 " [%s] error processing message size %ld "
316317 " extracted from stream %s %s" ,
317318 Name (), message->len (), consumer_topic_->name ().c_str (),
318319 status_.ToString ().c_str ());
319320 } else {
320- Log (InfoLogLevel::DEBUG_LEVEL, env_->info_log_ ,
321+ Log (InfoLogLevel::DEBUG_LEVEL, env_->GetLogger () ,
321322 " [%s] successfully processed message size %ld "
322323 " extracted from stream %s %s" ,
323324 Name (), message->len (), consumer_topic_->name ().c_str (),
@@ -338,7 +339,7 @@ Status KafkaController::TailStream() {
338339 Status::IOError (consumer_topic_->name ().c_str (),
339340 RdKafka::err2str (message->err ()).c_str ());
340341
341- Log (InfoLogLevel::DEBUG_LEVEL, env_->info_log_ ,
342+ Log (InfoLogLevel::DEBUG_LEVEL, env_->GetLogger () ,
342343 " [%s] error reading %s %s" , Name (), consumer_topic_->name ().c_str (),
343344 RdKafka::err2str (message->err ()).c_str ());
344345
@@ -347,7 +348,7 @@ Status KafkaController::TailStream() {
347348 }
348349 }
349350 }
350- Log (InfoLogLevel::DEBUG_LEVEL, env_->info_log_ ,
351+ Log (InfoLogLevel::DEBUG_LEVEL, env_->GetLogger () ,
351352 " [%s] TailStream topic %s finished: %s" , Name (),
352353 consumer_topic_->name ().c_str (), status_.ToString ().c_str ());
353354
@@ -369,7 +370,7 @@ Status KafkaController::InitializePartitions() {
369370 status_ = Status::IOError (consumer_topic_->name ().c_str (),
370371 RdKafka::err2str (err).c_str ());
371372
372- Log (InfoLogLevel::DEBUG_LEVEL, env_->info_log_ ,
373+ Log (InfoLogLevel::DEBUG_LEVEL, env_->GetLogger () ,
373374 " [%s] S3ReadableFile file %s Unable to find shards %s" , Name (),
374375 consumer_topic_->name ().c_str (), status_.ToString ().c_str ());
375376
0 commit comments