From e2264c254b0d26a574ce928c869246e547dd614a Mon Sep 17 00:00:00 2001 From: Charles Lowell Date: Mon, 24 Mar 2025 20:18:50 -0500 Subject: [PATCH 01/14] =?UTF-8?q?=F0=9F=9A=8F=20run=20nan=20->=20napi=20mi?= =?UTF-8?q?gration=20script?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- binding.gyp | 14 +- .../package.json | 1 + .../package.json | 1 + examples/package.json | 1 + examples/performance/package.json | 1 + examples/typescript/package.json | 1 + package.json | 2 +- schemaregistry/package.json | 1 + src/admin.cc | 444 +++++----- src/admin.h | 39 +- src/binding.cc | 34 +- src/binding.h | 3 +- src/callbacks.cc | 199 +++-- src/callbacks.h | 17 +- src/common.cc | 789 +++++++++--------- src/common.h | 73 +- src/config.cc | 35 +- src/config.h | 7 +- src/connection.cc | 246 +++--- src/connection.h | 27 +- src/errors.cc | 34 +- src/errors.h | 11 +- src/kafka-consumer.cc | 684 ++++++++------- src/kafka-consumer.h | 65 +- src/producer.cc | 435 +++++----- src/producer.h | 42 +- src/topic.cc | 75 +- src/topic.h | 21 +- src/workers.cc | 531 ++++++------ src/workers.h | 203 ++--- 30 files changed, 2093 insertions(+), 1943 deletions(-) diff --git a/binding.gyp b/binding.gyp index f5b7e40e..373cb766 100644 --- a/binding.gyp +++ b/binding.gyp @@ -8,6 +8,15 @@ "targets": [ { "target_name": "confluent-kafka-javascript", + "cflags!": [ "-fno-exceptions" ], + "cflags_cc!": [ "-fno-exceptions" ], + "xcode_settings": { "GCC_ENABLE_CPP_EXCEPTIONS": "YES", + "CLANG_CXX_LIBRARY": "libc++", + "MACOSX_DEPLOYMENT_TARGET": "10.7", + }, + "msvs_settings": { + "VCCLCompilerTool": { "ExceptionHandling": 1 }, + }, 'sources': [ 'src/binding.cc', 'src/callbacks.cc', @@ -22,7 +31,7 @@ 'src/admin.cc' ], "include_dirs": [ - "=18.0.0" diff --git a/schemaregistry/package.json b/schemaregistry/package.json index 4a0c8f16..1d5b1b15 100644 --- a/schemaregistry/package.json +++ b/schemaregistry/package.json @@ -30,6 +30,7 @@ "uuid": "^10.0.0" }, "dependencies": { + "node-addon-api": "8.3.1", "@aws-sdk/client-kms": "^3.637.0", "@aws-sdk/credential-providers": "^3.637.0", "@azure/identity": "^4.4.1", diff --git a/src/admin.cc b/src/admin.cc index 568cf710..997a337e 100644 --- a/src/admin.cc +++ b/src/admin.cc @@ -16,7 +16,7 @@ #include "src/workers.h" -using Nan::FunctionCallbackInfo; +using Napi::FunctionCallbackInfo; namespace NodeKafka { @@ -98,83 +98,89 @@ Baton AdminClient::Disconnect() { return Baton(RdKafka::ERR_NO_ERROR); } -Nan::Persistent AdminClient::constructor; +Napi::FunctionReference AdminClient::constructor; -void AdminClient::Init(v8::Local exports) { - Nan::HandleScope scope; +void AdminClient::Init(Napi::Object exports) { + Napi::HandleScope scope(env); + + Napi::FunctionReference tpl = Napi::Function::New(env, New); + tpl->SetClassName(Napi::String::New(env, "AdminClient")); - v8::Local tpl = Nan::New(New); - tpl->SetClassName(Nan::New("AdminClient").ToLocalChecked()); - tpl->InstanceTemplate()->SetInternalFieldCount(1); // Inherited from NodeKafka::Connection - Nan::SetPrototypeMethod(tpl, "configureCallbacks", NodeConfigureCallbacks); - Nan::SetPrototypeMethod(tpl, "name", NodeName); + InstanceMethod("configureCallbacks", &NodeConfigureCallbacks), + InstanceMethod("name", &NodeName), // Admin client operations - Nan::SetPrototypeMethod(tpl, "createTopic", NodeCreateTopic); - Nan::SetPrototypeMethod(tpl, "deleteTopic", NodeDeleteTopic); - Nan::SetPrototypeMethod(tpl, "createPartitions", NodeCreatePartitions); - Nan::SetPrototypeMethod(tpl, "deleteRecords", NodeDeleteRecords); - Nan::SetPrototypeMethod(tpl, "describeTopics", NodeDescribeTopics); - Nan::SetPrototypeMethod(tpl, "listOffsets", NodeListOffsets); + InstanceMethod("createTopic", &NodeCreateTopic), + InstanceMethod("deleteTopic", &NodeDeleteTopic), + InstanceMethod("createPartitions", &NodeCreatePartitions), + InstanceMethod("deleteRecords", &NodeDeleteRecords), + InstanceMethod("describeTopics", &NodeDescribeTopics), + InstanceMethod("listOffsets", &NodeListOffsets), // Consumer group related operations - Nan::SetPrototypeMethod(tpl, "listGroups", NodeListGroups); - Nan::SetPrototypeMethod(tpl, "describeGroups", NodeDescribeGroups); - Nan::SetPrototypeMethod(tpl, "deleteGroups", NodeDeleteGroups); - Nan::SetPrototypeMethod(tpl, "listConsumerGroupOffsets", + InstanceMethod("listGroups", &NodeListGroups), + InstanceMethod("describeGroups", &NodeDescribeGroups), + InstanceMethod("deleteGroups", &NodeDeleteGroups), + Napi::SetPrototypeMethod(tpl, "listConsumerGroupOffsets", NodeListConsumerGroupOffsets); - Nan::SetPrototypeMethod(tpl, "connect", NodeConnect); - Nan::SetPrototypeMethod(tpl, "disconnect", NodeDisconnect); - Nan::SetPrototypeMethod(tpl, "setSaslCredentials", NodeSetSaslCredentials); - Nan::SetPrototypeMethod(tpl, "getMetadata", NodeGetMetadata); - Nan::SetPrototypeMethod(tpl, "setOAuthBearerToken", NodeSetOAuthBearerToken); - Nan::SetPrototypeMethod(tpl, "setOAuthBearerTokenFailure", + InstanceMethod("connect", &NodeConnect), + InstanceMethod("disconnect", &NodeDisconnect), + InstanceMethod("setSaslCredentials", &NodeSetSaslCredentials), + InstanceMethod("getMetadata", &NodeGetMetadata), + InstanceMethod("setOAuthBearerToken", &NodeSetOAuthBearerToken), + Napi::SetPrototypeMethod(tpl, "setOAuthBearerTokenFailure", NodeSetOAuthBearerTokenFailure); constructor.Reset( - (tpl->GetFunction(Nan::GetCurrentContext())).ToLocalChecked()); - Nan::Set(exports, Nan::New("AdminClient").ToLocalChecked(), - tpl->GetFunction(Nan::GetCurrentContext()).ToLocalChecked()); + (tpl->GetFunction(Napi::GetCurrentContext()))); + (exports).Set(Napi::String::New(env, "AdminClient"), + tpl->GetFunction(Napi::GetCurrentContext())); } -void AdminClient::New(const Nan::FunctionCallbackInfo& info) { +void AdminClient::New(const Napi::CallbackInfo& info) { + Napi::Env env = info.Env(); if (!info.IsConstructCall()) { - return Nan::ThrowError("non-constructor invocation not supported"); + Napi::Error::New(env, "non-constructor invocation not supported").ThrowAsJavaScriptException(); + return env.Null(); } if (info.Length() < 1) { - return Nan::ThrowError("You must supply a global configuration or a preexisting client"); // NOLINT + Napi::Error::New(env, "You must supply a global configuration or a preexisting client").ThrowAsJavaScriptException(); + return env.Null(); // NOLINT } Connection *connection = NULL; Conf *gconfig = NULL; AdminClient *client = NULL; - if (info.Length() >= 3 && !info[2]->IsNull() && !info[2]->IsUndefined()) { - if (!info[2]->IsObject()) { - return Nan::ThrowError("Third argument, if provided, must be a client object"); // NOLINT + if (info.Length() >= 3 && !info[2].IsNull() && !info[2].IsUndefined()) { + if (!info[2].IsObject()) { + Napi::Error::New(env, "Third argument, if provided, must be a client object").ThrowAsJavaScriptException(); + return env.Null(); // NOLINT } // We check whether this is a wrapped object within the calling JavaScript // code, so it's safe to unwrap it here. We Unwrap it directly into a // Connection object, since it's OK to unwrap into the parent class. connection = ObjectWrap::Unwrap( - info[2]->ToObject(Nan::GetCurrentContext()).ToLocalChecked()); + info[2].ToObject(Napi::GetCurrentContext())); client = new AdminClient(connection); } else { - if (!info[0]->IsObject()) { - return Nan::ThrowError("Global configuration data must be specified"); + if (!info[0].IsObject()) { + Napi::Error::New(env, "Global configuration data must be specified").ThrowAsJavaScriptException(); + return env.Null(); } std::string errstr; gconfig = Conf::create( RdKafka::Conf::CONF_GLOBAL, - (info[0]->ToObject(Nan::GetCurrentContext())).ToLocalChecked(), errstr); + (info[0].ToObject(Napi::GetCurrentContext())), errstr); if (!gconfig) { - return Nan::ThrowError(errstr.c_str()); + Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); + return env.Null(); } client = new AdminClient(gconfig); } @@ -186,18 +192,19 @@ void AdminClient::New(const Nan::FunctionCallbackInfo& info) { // basically it sets the configuration data // we don't need to do that because we lazy load it - info.GetReturnValue().Set(info.This()); + return info.This(); } -v8::Local AdminClient::NewInstance(v8::Local arg) { - Nan::EscapableHandleScope scope; +Napi::Object AdminClient::NewInstance(Napi::Value arg) { + Napi::Env env = arg.Env(); + Napi::EscapableHandleScope scope(env); const unsigned argc = 1; - v8::Local argv[argc] = { arg }; - v8::Local cons = Nan::New(constructor); - v8::Local instance = - Nan::NewInstance(cons, argc, argv).ToLocalChecked(); + Napi::Value argv[argc] = { arg }; + Napi::Function cons = Napi::Function::New(env, constructor); + Napi::Object instance = + Napi::NewInstance(cons, argc, argv); return scope.Escape(instance); } @@ -989,8 +996,8 @@ void AdminClient::DeactivateDispatchers() { * C++ Exported prototype functions */ -NAN_METHOD(AdminClient::NodeConnect) { - Nan::HandleScope scope; +Napi::Value AdminClient::NodeConnect(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); AdminClient* client = ObjectWrap::Unwrap(info.This()); @@ -1005,133 +1012,139 @@ NAN_METHOD(AdminClient::NodeConnect) { Baton b = client->Connect(); // Let the JS library throw if we need to so the error can be more rich int error_code = static_cast(b.err()); - return info.GetReturnValue().Set(Nan::New(error_code)); + return return Napi::Number::New(env, error_code); } -NAN_METHOD(AdminClient::NodeDisconnect) { - Nan::HandleScope scope; +Napi::Value AdminClient::NodeDisconnect(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); AdminClient* client = ObjectWrap::Unwrap(info.This()); Baton b = client->Disconnect(); // Let the JS library throw if we need to so the error can be more rich int error_code = static_cast(b.err()); - return info.GetReturnValue().Set(Nan::New(error_code)); + return return Napi::Number::New(env, error_code); } /** * Create topic */ -NAN_METHOD(AdminClient::NodeCreateTopic) { - Nan::HandleScope scope; +Napi::Value AdminClient::NodeCreateTopic(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); - if (info.Length() < 3 || !info[2]->IsFunction()) { + if (info.Length() < 3 || !info[2].IsFunction()) { // Just throw an exception - return Nan::ThrowError("Need to specify a callback"); + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[1]->IsNumber()) { - return Nan::ThrowError("Must provide 'timeout'"); + if (!info[1].IsNumber()) { + Napi::Error::New(env, "Must provide 'timeout'").ThrowAsJavaScriptException(); + return env.Null(); } // Create the final callback object - v8::Local cb = info[2].As(); - Nan::Callback *callback = new Nan::Callback(cb); + Napi::Function cb = info[2].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(cb); AdminClient* client = ObjectWrap::Unwrap(info.This()); // Get the timeout - int timeout = Nan::To(info[1]).FromJust(); + int timeout = info[1].As().Int32Value(); std::string errstr; // Get that topic we want to create rd_kafka_NewTopic_t* topic = Conversion::Admin::FromV8TopicObject( - info[0].As(), errstr); + info[0].As(), errstr); if (topic == NULL) { - Nan::ThrowError(errstr.c_str()); - return; + Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); + return env.Null(); } // Queue up dat work - Nan::AsyncQueueWorker( + Napi::AsyncQueueWorker( new Workers::AdminClientCreateTopic(callback, client, topic, timeout)); - return info.GetReturnValue().Set(Nan::Null()); + return return env.Null(); } /** * Delete topic */ -NAN_METHOD(AdminClient::NodeDeleteTopic) { - Nan::HandleScope scope; +Napi::Value AdminClient::NodeDeleteTopic(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); - if (info.Length() < 3 || !info[2]->IsFunction()) { + if (info.Length() < 3 || !info[2].IsFunction()) { // Just throw an exception - return Nan::ThrowError("Need to specify a callback"); + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[1]->IsNumber() || !info[0]->IsString()) { - return Nan::ThrowError("Must provide 'timeout', and 'topicName'"); + if (!info[1].IsNumber() || !info[0].IsString()) { + Napi::Error::New(env, "Must provide 'timeout', and 'topicName'").ThrowAsJavaScriptException(); + return env.Null(); } // Create the final callback object - v8::Local cb = info[2].As(); - Nan::Callback *callback = new Nan::Callback(cb); + Napi::Function cb = info[2].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(cb); AdminClient* client = ObjectWrap::Unwrap(info.This()); // Get the topic name from the string std::string topic_name = Util::FromV8String( - Nan::To(info[0]).ToLocalChecked()); + info[0].To()); // Get the timeout - int timeout = Nan::To(info[1]).FromJust(); + int timeout = info[1].As().Int32Value(); // Get that topic we want to create rd_kafka_DeleteTopic_t* topic = rd_kafka_DeleteTopic_new( topic_name.c_str()); // Queue up dat work - Nan::AsyncQueueWorker( + Napi::AsyncQueueWorker( new Workers::AdminClientDeleteTopic(callback, client, topic, timeout)); - return info.GetReturnValue().Set(Nan::Null()); + return return env.Null(); } /** * Delete topic */ -NAN_METHOD(AdminClient::NodeCreatePartitions) { - Nan::HandleScope scope; +Napi::Value AdminClient::NodeCreatePartitions(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); if (info.Length() < 4) { // Just throw an exception - return Nan::ThrowError("Need to specify a callback"); + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[3]->IsFunction()) { + if (!info[3].IsFunction()) { // Just throw an exception - return Nan::ThrowError("Need to specify a callback 2"); + Napi::Error::New(env, "Need to specify a callback 2").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[2]->IsNumber() || !info[1]->IsNumber() || !info[0]->IsString()) { - return Nan::ThrowError( + if (!info[2].IsNumber() || !info[1].IsNumber() || !info[0].IsString()) { + return Napi::ThrowError( "Must provide 'totalPartitions', 'timeout', and 'topicName'"); } // Create the final callback object - v8::Local cb = info[3].As(); - Nan::Callback *callback = new Nan::Callback(cb); + Napi::Function cb = info[3].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(cb); AdminClient* client = ObjectWrap::Unwrap(info.This()); // Get the timeout - int timeout = Nan::To(info[2]).FromJust(); + int timeout = info[2].As().Int32Value(); // Get the total number of desired partitions - int partition_total_count = Nan::To(info[1]).FromJust(); + int partition_total_count = info[1].As().Int32Value(); // Get the topic name from the string std::string topic_name = Util::FromV8String( - Nan::To(info[0]).ToLocalChecked()); + info[0].To()); // Create an error buffer we can throw char* errbuf = reinterpret_cast(malloc(100)); @@ -1143,36 +1156,39 @@ NAN_METHOD(AdminClient::NodeCreatePartitions) { // If we got a failure on the create new partitions request, // fail here if (new_partitions == NULL) { - return Nan::ThrowError(errbuf); + Napi::Error::New(env, errbuf).ThrowAsJavaScriptException(); + return env.Null(); } // Queue up dat work - Nan::AsyncQueueWorker(new Workers::AdminClientCreatePartitions( + Napi::AsyncQueueWorker(new Workers::AdminClientCreatePartitions( callback, client, new_partitions, timeout)); - return info.GetReturnValue().Set(Nan::Null()); + return return env.Null(); } /** * List Consumer Groups. */ -NAN_METHOD(AdminClient::NodeListGroups) { - Nan::HandleScope scope; +Napi::Value AdminClient::NodeListGroups(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); - if (info.Length() < 2 || !info[1]->IsFunction()) { + if (info.Length() < 2 || !info[1].IsFunction()) { // Just throw an exception - return Nan::ThrowError("Need to specify a callback"); + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[0]->IsObject()) { - return Nan::ThrowError("Must provide options object"); + if (!info[0].IsObject()) { + Napi::Error::New(env, "Must provide options object").ThrowAsJavaScriptException(); + return env.Null(); } - v8::Local config = info[0].As(); + Napi::Object config = info[0].As(); // Create the final callback object - v8::Local cb = info[1].As(); - Nan::Callback *callback = new Nan::Callback(cb); + Napi::Function cb = info[1].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(cb); AdminClient *client = ObjectWrap::Unwrap(info.This()); // Get the timeout - default 5000. @@ -1180,14 +1196,14 @@ NAN_METHOD(AdminClient::NodeListGroups) { // Get the match states, or not if they are unset. std::vector match_states; - v8::Local match_consumer_group_states_key = - Nan::New("matchConsumerGroupStates").ToLocalChecked(); + Napi::String match_consumer_group_states_key = + Napi::String::New(env, "matchConsumerGroupStates"); bool is_match_states_set = - Nan::Has(config, match_consumer_group_states_key).FromMaybe(false); - v8::Local match_states_array = Nan::New(); + (config).Has(match_consumer_group_states_key).FromMaybe(false); + Napi::Array match_states_array = Napi::Array::New(env); if (is_match_states_set) { - match_states_array = GetParameter>( + match_states_array = GetParameter( config, "matchConsumerGroupStates", match_states_array); if (match_states_array->Length()) { match_states = Conversion::Admin::FromV8GroupStateArray( @@ -1196,38 +1212,42 @@ NAN_METHOD(AdminClient::NodeListGroups) { } // Queue the work. - Nan::AsyncQueueWorker(new Workers::AdminClientListGroups( + Napi::AsyncQueueWorker(new Workers::AdminClientListGroups( callback, client, is_match_states_set, match_states, timeout_ms)); } /** * Describe Consumer Groups. */ -NAN_METHOD(AdminClient::NodeDescribeGroups) { - Nan::HandleScope scope; +Napi::Value AdminClient::NodeDescribeGroups(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); - if (info.Length() < 3 || !info[2]->IsFunction()) { + if (info.Length() < 3 || !info[2].IsFunction()) { // Just throw an exception - return Nan::ThrowError("Need to specify a callback"); + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[0]->IsArray()) { - return Nan::ThrowError("Must provide group name array"); + if (!info[0].IsArray()) { + Napi::Error::New(env, "Must provide group name array").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[1]->IsObject()) { - return Nan::ThrowError("Must provide options object"); + if (!info[1].IsObject()) { + Napi::Error::New(env, "Must provide options object").ThrowAsJavaScriptException(); + return env.Null(); } // Get list of group names to describe. - v8::Local group_names = info[0].As(); + Napi::Array group_names = info[0].As(); if (group_names->Length() == 0) { - return Nan::ThrowError("Must provide at least one group name"); + Napi::Error::New(env, "Must provide at least one group name").ThrowAsJavaScriptException(); + return env.Null(); } std::vector group_names_vector = v8ArrayToStringVector(group_names); - v8::Local config = info[1].As(); + Napi::Object config = info[1].As(); // Get the timeout - default 5000. int timeout_ms = GetParameter(config, "timeout", 5000); @@ -1237,12 +1257,12 @@ NAN_METHOD(AdminClient::NodeDescribeGroups) { GetParameter(config, "includeAuthorizedOperations", false); // Create the final callback object - v8::Local cb = info[2].As(); - Nan::Callback *callback = new Nan::Callback(cb); + Napi::Function cb = info[2].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(cb); AdminClient *client = ObjectWrap::Unwrap(info.This()); // Queue the work. - Nan::AsyncQueueWorker(new Workers::AdminClientDescribeGroups( + Napi::AsyncQueueWorker(new Workers::AdminClientDescribeGroups( callback, client, group_names_vector, include_authorized_operations, timeout_ms)); } @@ -1250,27 +1270,31 @@ NAN_METHOD(AdminClient::NodeDescribeGroups) { /** * Delete Consumer Groups. */ -NAN_METHOD(AdminClient::NodeDeleteGroups) { - Nan::HandleScope scope; +Napi::Value AdminClient::NodeDeleteGroups(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); - if (info.Length() < 3 || !info[2]->IsFunction()) { + if (info.Length() < 3 || !info[2].IsFunction()) { // Just throw an exception - return Nan::ThrowError("Need to specify a callback"); + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[0]->IsArray()) { - return Nan::ThrowError("Must provide group name array"); + if (!info[0].IsArray()) { + Napi::Error::New(env, "Must provide group name array").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[1]->IsObject()) { - return Nan::ThrowError("Must provide options object"); + if (!info[1].IsObject()) { + Napi::Error::New(env, "Must provide options object").ThrowAsJavaScriptException(); + return env.Null(); } // Get list of group names to delete, and convert it into an // rd_kafka_DeleteGroup_t array. - v8::Local group_names = info[0].As(); + Napi::Array group_names = info[0].As(); if (group_names->Length() == 0) { - return Nan::ThrowError("Must provide at least one group name"); + Napi::Error::New(env, "Must provide at least one group name").ThrowAsJavaScriptException(); + return env.Null(); } std::vector group_names_vector = v8ArrayToStringVector(group_names); @@ -1282,39 +1306,42 @@ NAN_METHOD(AdminClient::NodeDeleteGroups) { group_list[i] = rd_kafka_DeleteGroup_new(group_names_vector[i].c_str()); } - v8::Local config = info[1].As(); + Napi::Object config = info[1].As(); // Get the timeout - default 5000. int timeout_ms = GetParameter(config, "timeout", 5000); // Create the final callback object - v8::Local cb = info[2].As(); - Nan::Callback *callback = new Nan::Callback(cb); + Napi::Function cb = info[2].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(cb); AdminClient *client = ObjectWrap::Unwrap(info.This()); // Queue the work. - Nan::AsyncQueueWorker(new Workers::AdminClientDeleteGroups( + Napi::AsyncQueueWorker(new Workers::AdminClientDeleteGroups( callback, client, group_list, group_names_vector.size(), timeout_ms)); } /** * List Consumer Group Offsets. */ -NAN_METHOD(AdminClient::NodeListConsumerGroupOffsets) { - Nan::HandleScope scope; +Napi::Value AdminClient::NodeListConsumerGroupOffsets(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); - if (info.Length() < 3 || !info[2]->IsFunction()) { - return Nan::ThrowError("Need to specify a callback"); + if (info.Length() < 3 || !info[2].IsFunction()) { + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[0]->IsArray()) { - return Nan::ThrowError("Must provide an array of 'listGroupOffsets'"); + if (!info[0].IsArray()) { + Napi::Error::New(env, "Must provide an array of 'listGroupOffsets'").ThrowAsJavaScriptException(); + return env.Null(); } - v8::Local listGroupOffsets = info[0].As(); + Napi::Array listGroupOffsets = info[0].As(); if (listGroupOffsets->Length() == 0) { - return Nan::ThrowError("'listGroupOffsets' cannot be empty"); + Napi::Error::New(env, "'listGroupOffsets' cannot be empty").ThrowAsJavaScriptException(); + return env.Null(); } /** @@ -1328,41 +1355,44 @@ NAN_METHOD(AdminClient::NodeListConsumerGroupOffsets) { listGroupOffsets->Length())); for (uint32_t i = 0; i < listGroupOffsets->Length(); ++i) { - v8::Local listGroupOffsetValue = - Nan::Get(listGroupOffsets, i).ToLocalChecked(); - if (!listGroupOffsetValue->IsObject()) { - return Nan::ThrowError("Each entry must be an object"); + Napi::Value listGroupOffsetValue = + (listGroupOffsets).Get(i); + if (!listGroupOffsetValue.IsObject()) { + Napi::Error::New(env, "Each entry must be an object").ThrowAsJavaScriptException(); + return env.Null(); } - v8::Local listGroupOffsetObj = - listGroupOffsetValue.As(); + Napi::Object listGroupOffsetObj = + listGroupOffsetValue.As(); - v8::Local groupIdValue; - if (!Nan::Get(listGroupOffsetObj, Nan::New("groupId").ToLocalChecked()) + Napi::Value groupIdValue; + if (!(listGroupOffsetObj).Get(Napi::String::New(env, "groupId")) .ToLocal(&groupIdValue)) { - return Nan::ThrowError("Each entry must have 'groupId'"); + Napi::Error::New(env, "Each entry must have 'groupId'").ThrowAsJavaScriptException(); + return env.Null(); } - Nan::MaybeLocal groupIdMaybe = - Nan::To(groupIdValue); + Napi::MaybeLocal groupIdMaybe = + groupIdValue.To(); if (groupIdMaybe.IsEmpty()) { - return Nan::ThrowError("'groupId' must be a string"); + Napi::Error::New(env, "'groupId' must be a string").ThrowAsJavaScriptException(); + return env.Null(); } - Nan::Utf8String groupIdUtf8(groupIdMaybe.ToLocalChecked()); + std::string groupIdUtf8 = groupIdMaybe.ToLocalChecked(.As()); std::string groupIdStr = *groupIdUtf8; - v8::Local partitionsValue; + Napi::Value partitionsValue; rd_kafka_topic_partition_list_t *partitions = NULL; - if (Nan::Get(listGroupOffsetObj, Nan::New("partitions").ToLocalChecked()) + if ((listGroupOffsetObj).Get(Napi::String::New(env, "partitions")) .ToLocal(&partitionsValue) && partitionsValue->IsArray()) { - v8::Local partitionsArray = partitionsValue.As(); + Napi::Array partitionsArray = partitionsValue.As(); if (partitionsArray->Length() > 0) { partitions = Conversion::TopicPartition:: TopicPartitionv8ArrayToTopicPartitionList(partitionsArray, false); if (partitions == NULL) { - return Nan::ThrowError( + return Napi::ThrowError( "Failed to convert partitions to list, provide proper object in " "partitions"); } @@ -1378,19 +1408,19 @@ NAN_METHOD(AdminClient::NodeListConsumerGroupOffsets) { } // Now process the second argument: options (timeout and requireStableOffsets) - v8::Local options = info[1].As(); + Napi::Object options = info[1].As(); bool require_stable_offsets = GetParameter(options, "requireStableOffsets", false); int timeout_ms = GetParameter(options, "timeout", 5000); // Create the final callback object - v8::Local cb = info[2].As(); - Nan::Callback *callback = new Nan::Callback(cb); + Napi::Function cb = info[2].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(cb); AdminClient *client = ObjectWrap::Unwrap(info.This()); // Queue the worker to process the offset fetch request asynchronously - Nan::AsyncQueueWorker(new Workers::AdminClientListConsumerGroupOffsets( + Napi::AsyncQueueWorker(new Workers::AdminClientListConsumerGroupOffsets( callback, client, requests, listGroupOffsets->Length(), require_stable_offsets, timeout_ms)); } @@ -1398,28 +1428,31 @@ NAN_METHOD(AdminClient::NodeListConsumerGroupOffsets) { /** * Delete Records. */ -NAN_METHOD(AdminClient::NodeDeleteRecords) { - Nan::HandleScope scope; +Napi::Value AdminClient::NodeDeleteRecords(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); - if (info.Length() < 3 || !info[2]->IsFunction()) { - return Nan::ThrowError("Need to specify a callback"); + if (info.Length() < 3 || !info[2].IsFunction()) { + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[0]->IsArray()) { - return Nan::ThrowError( + if (!info[0].IsArray()) { + return Napi::ThrowError( "Must provide array containg 'TopicPartitionOffset' objects"); } - if (!info[1]->IsObject()) { - return Nan::ThrowError("Must provide 'options' object"); + if (!info[1].IsObject()) { + Napi::Error::New(env, "Must provide 'options' object").ThrowAsJavaScriptException(); + return env.Null(); } // Get list of TopicPartitions to delete records from // and convert it into rd_kafka_DeleteRecords_t array - v8::Local delete_records_list = info[0].As(); + Napi::Array delete_records_list = info[0].As(); if (delete_records_list->Length() == 0) { - return Nan::ThrowError("Must provide at least one TopicPartitionOffset"); + Napi::Error::New(env, "Must provide at least one TopicPartitionOffset").ThrowAsJavaScriptException(); + return env.Null(); } /** @@ -1435,7 +1468,7 @@ NAN_METHOD(AdminClient::NodeDeleteRecords) { Conversion::TopicPartition::TopicPartitionv8ArrayToTopicPartitionList( delete_records_list, true); if (partitions == NULL) { - return Nan::ThrowError( + return Napi::ThrowError( "Failed to convert objects in delete records list, provide proper " "TopicPartitionOffset objects"); } @@ -1444,40 +1477,43 @@ NAN_METHOD(AdminClient::NodeDeleteRecords) { rd_kafka_topic_partition_list_destroy(partitions); // Now process the second argument: options (timeout and operation_timeout) - v8::Local options = info[1].As(); + Napi::Object options = info[1].As(); int operation_timeout_ms = GetParameter(options, "operation_timeout", 60000); int timeout_ms = GetParameter(options, "timeout", 5000); // Create the final callback object - v8::Local cb = info[2].As(); - Nan::Callback *callback = new Nan::Callback(cb); + Napi::Function cb = info[2].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(cb); AdminClient *client = ObjectWrap::Unwrap(info.This()); // Queue the worker to process the offset fetch request asynchronously - Nan::AsyncQueueWorker(new Workers::AdminClientDeleteRecords( + Napi::AsyncQueueWorker(new Workers::AdminClientDeleteRecords( callback, client, delete_records, 1, operation_timeout_ms, timeout_ms)); } /** * Describe Topics. */ -NAN_METHOD(AdminClient::NodeDescribeTopics) { - Nan::HandleScope scope; +Napi::Value AdminClient::NodeDescribeTopics(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); - if (info.Length() < 3 || !info[2]->IsFunction()) { - return Nan::ThrowError("Need to specify a callback"); + if (info.Length() < 3 || !info[2].IsFunction()) { + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[0]->IsArray()) { - return Nan::ThrowError("Must provide an array of 'topicNames'"); + if (!info[0].IsArray()) { + Napi::Error::New(env, "Must provide an array of 'topicNames'").ThrowAsJavaScriptException(); + return env.Null(); } - v8::Local topicNames = info[0].As(); + Napi::Array topicNames = info[0].As(); if (topicNames->Length() == 0) { - return Nan::ThrowError("'topicNames' cannot be empty"); + Napi::Error::New(env, "'topicNames' cannot be empty").ThrowAsJavaScriptException(); + return env.Null(); } std::vector topicNamesVector = v8ArrayToStringVector(topicNames); @@ -1499,18 +1535,18 @@ NAN_METHOD(AdminClient::NodeDescribeTopics) { free(topics); - v8::Local options = info[1].As(); + Napi::Object options = info[1].As(); bool include_authorised_operations = GetParameter(options, "includeAuthorizedOperations", false); int timeout_ms = GetParameter(options, "timeout", 5000); - v8::Local cb = info[2].As(); - Nan::Callback *callback = new Nan::Callback(cb); + Napi::Function cb = info[2].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(cb); AdminClient *client = ObjectWrap::Unwrap(info.This()); - Nan::AsyncQueueWorker(new Workers::AdminClientDescribeTopics( + Napi::AsyncQueueWorker(new Workers::AdminClientDescribeTopics( callback, client, topic_collection, include_authorised_operations, timeout_ms)); } @@ -1519,18 +1555,20 @@ NAN_METHOD(AdminClient::NodeDescribeTopics) { /** * List Offsets. */ -NAN_METHOD(AdminClient::NodeListOffsets) { - Nan::HandleScope scope; +Napi::Value AdminClient::NodeListOffsets(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); - if (info.Length() < 3 || !info[2]->IsFunction()) { - return Nan::ThrowError("Need to specify a callback"); + if (info.Length() < 3 || !info[2].IsFunction()) { + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[0]->IsArray()) { - return Nan::ThrowError("Must provide an array of 'TopicPartitionOffsets'"); + if (!info[0].IsArray()) { + Napi::Error::New(env, "Must provide an array of 'TopicPartitionOffsets'").ThrowAsJavaScriptException(); + return env.Null(); } - v8::Local listOffsets = info[0].As(); + Napi::Array listOffsets = info[0].As(); /** * The ownership of this is taken by @@ -1541,7 +1579,7 @@ NAN_METHOD(AdminClient::NodeListOffsets) { TopicPartitionOffsetSpecv8ArrayToTopicPartitionList(listOffsets); // Now process the second argument: options (timeout and isolationLevel) - v8::Local options = info[1].As(); + Napi::Object options = info[1].As(); rd_kafka_IsolationLevel_t isolation_level = static_cast(GetParameter( @@ -1551,12 +1589,12 @@ NAN_METHOD(AdminClient::NodeListOffsets) { int timeout_ms = GetParameter(options, "timeout", 5000); // Create the final callback object - v8::Local cb = info[2].As(); - Nan::Callback *callback = new Nan::Callback(cb); + Napi::Function cb = info[2].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(cb); AdminClient *client = ObjectWrap::Unwrap(info.This()); // Queue the worker to process the offset fetch request asynchronously - Nan::AsyncQueueWorker(new Workers::AdminClientListOffsets( + Napi::AsyncQueueWorker(new Workers::AdminClientListOffsets( callback, client, partitions, timeout_ms, isolation_level)); } diff --git a/src/admin.h b/src/admin.h index 9a269134..a4bba54e 100644 --- a/src/admin.h +++ b/src/admin.h @@ -11,7 +11,8 @@ #ifndef SRC_ADMIN_H_ #define SRC_ADMIN_H_ -#include +#include +#include #include #include #include @@ -38,8 +39,8 @@ namespace NodeKafka { class AdminClient : public Connection { public: - static void Init(v8::Local); - static v8::Local NewInstance(v8::Local); + static void Init(Napi::Object); + static Napi::Object NewInstance(Napi::Value); void ActivateDispatchers(); void DeactivateDispatchers(); @@ -76,8 +77,8 @@ class AdminClient : public Connection { rd_kafka_event_t** event_response); protected: - static Nan::Persistent constructor; - static void New(const Nan::FunctionCallbackInfo& info); + static Napi::FunctionReference constructor; + static void New(const Napi::CallbackInfo& info); explicit AdminClient(Conf* globalConfig); explicit AdminClient(Connection* existingConnection); @@ -87,22 +88,22 @@ class AdminClient : public Connection { private: // Node methods - // static NAN_METHOD(NodeValidateTopic); - static NAN_METHOD(NodeCreateTopic); - static NAN_METHOD(NodeDeleteTopic); - static NAN_METHOD(NodeCreatePartitions); + // static Napi::Value NodeValidateTopic(const Napi::CallbackInfo& info); + static Napi::Value NodeCreateTopic(const Napi::CallbackInfo& info); + static Napi::Value NodeDeleteTopic(const Napi::CallbackInfo& info); + static Napi::Value NodeCreatePartitions(const Napi::CallbackInfo& info); // Consumer group operations - static NAN_METHOD(NodeListGroups); - static NAN_METHOD(NodeDescribeGroups); - static NAN_METHOD(NodeDeleteGroups); - static NAN_METHOD(NodeListConsumerGroupOffsets); - static NAN_METHOD(NodeDeleteRecords); - static NAN_METHOD(NodeDescribeTopics); - static NAN_METHOD(NodeListOffsets); - - static NAN_METHOD(NodeConnect); - static NAN_METHOD(NodeDisconnect); + static Napi::Value NodeListGroups(const Napi::CallbackInfo& info); + static Napi::Value NodeDescribeGroups(const Napi::CallbackInfo& info); + static Napi::Value NodeDeleteGroups(const Napi::CallbackInfo& info); + static Napi::Value NodeListConsumerGroupOffsets(const Napi::CallbackInfo& info); + static Napi::Value NodeDeleteRecords(const Napi::CallbackInfo& info); + static Napi::Value NodeDescribeTopics(const Napi::CallbackInfo& info); + static Napi::Value NodeListOffsets(const Napi::CallbackInfo& info); + + static Napi::Value NodeConnect(const Napi::CallbackInfo& info); + static Napi::Value NodeDisconnect(const Napi::CallbackInfo& info); }; } // namespace NodeKafka diff --git a/src/binding.cc b/src/binding.cc index 7b3fe77c..f93938f6 100644 --- a/src/binding.cc +++ b/src/binding.cc @@ -19,32 +19,32 @@ using NodeKafka::Topic; using RdKafka::ErrorCode; -NAN_METHOD(NodeRdKafkaErr2Str) { - int points = Nan::To(info[0]).FromJust(); +Napi::Value NodeRdKafkaErr2Str(const Napi::CallbackInfo& info) { + int points = info[0].As().Int32Value(); // Cast to error code RdKafka::ErrorCode err = static_cast(points); std::string errstr = RdKafka::err2str(err); - info.GetReturnValue().Set(Nan::New(errstr).ToLocalChecked()); + return Napi::String::New(env, errstr); } -NAN_METHOD(NodeRdKafkaBuildInFeatures) { +Napi::Value NodeRdKafkaBuildInFeatures(const Napi::CallbackInfo& info) { RdKafka::Conf * config = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); std::string features; if (RdKafka::Conf::CONF_OK == config->get("builtin.features", features)) { - info.GetReturnValue().Set(Nan::New(features).ToLocalChecked()); + return Napi::String::New(env, features); } else { - info.GetReturnValue().Set(Nan::Undefined()); + return env.Undefined(); } delete config; } -void ConstantsInit(v8::Local exports) { - v8::Local topicConstants = Nan::New(); +void ConstantsInit(Napi::Object exports) { + Napi::Object topicConstants = Napi::Object::New(env); // RdKafka Error Code definitions NODE_DEFINE_CONSTANT(topicConstants, RdKafka::Topic::PARTITION_UA); @@ -53,24 +53,24 @@ void ConstantsInit(v8::Local exports) { NODE_DEFINE_CONSTANT(topicConstants, RdKafka::Topic::OFFSET_STORED); NODE_DEFINE_CONSTANT(topicConstants, RdKafka::Topic::OFFSET_INVALID); - Nan::Set(exports, Nan::New("topic").ToLocalChecked(), topicConstants); + (exports).Set(Napi::String::New(env, "topic"), topicConstants); - Nan::Set(exports, Nan::New("err2str").ToLocalChecked(), - Nan::GetFunction(Nan::New(NodeRdKafkaErr2Str)).ToLocalChecked()); // NOLINT + (exports).Set(Napi::String::New(env, "err2str"), + Napi::GetFunction(Napi::Function::New(env, NodeRdKafkaErr2Str))); // NOLINT - Nan::Set(exports, Nan::New("features").ToLocalChecked(), - Nan::GetFunction(Nan::New(NodeRdKafkaBuildInFeatures)).ToLocalChecked()); // NOLINT + (exports).Set(Napi::String::New(env, "features"), + Napi::GetFunction(Napi::Function::New(env, NodeRdKafkaBuildInFeatures))); // NOLINT } -void Init(v8::Local exports, v8::Local m_, void* v_) { +void Init(Napi::Object exports, Napi::Value m_, void* v_) { KafkaConsumer::Init(exports); Producer::Init(exports); AdminClient::Init(exports); Topic::Init(exports); ConstantsInit(exports); - Nan::Set(exports, Nan::New("librdkafkaVersion").ToLocalChecked(), - Nan::New(RdKafka::version_str().c_str()).ToLocalChecked()); + (exports).Set(Napi::String::New(env, "librdkafkaVersion"), + Napi::New(env, RdKafka::version_str().c_str())); } -NODE_MODULE(kafka, Init) +NODE_API_MODULE(kafka, Init) diff --git a/src/binding.h b/src/binding.h index 0d656b10..b1a5a422 100644 --- a/src/binding.h +++ b/src/binding.h @@ -10,7 +10,8 @@ #ifndef SRC_BINDING_H_ #define SRC_BINDING_H_ -#include +#include +#include #include #include "rdkafkacpp.h" // NOLINT #include "src/common.h" diff --git a/src/callbacks.cc b/src/callbacks.cc index 9e7ce892..23f727a8 100644 --- a/src/callbacks.cc +++ b/src/callbacks.cc @@ -17,35 +17,34 @@ #include "src/kafka-consumer.h" -using v8::Local; -using v8::Value; -using v8::Object; -using v8::String; -using v8::Array; -using v8::Number; +using Napi::Value; +using Napi::Object; +using Napi::String; +using Napi::Array; +using Napi::Number; namespace NodeKafka { namespace Callbacks { -v8::Local TopicPartitionListToV8Array( +Napi::Array TopicPartitionListToV8Array( std::vector parts) { - v8::Local tp_array = Nan::New(); + Napi::Array tp_array = Napi::Array::New(env); for (size_t i = 0; i < parts.size(); i++) { - v8::Local tp_obj = Nan::New(); + Napi::Object tp_obj = Napi::Object::New(env); event_topic_partition_t tp = parts[i]; - Nan::Set(tp_obj, Nan::New("topic").ToLocalChecked(), - Nan::New(tp.topic.c_str()).ToLocalChecked()); - Nan::Set(tp_obj, Nan::New("partition").ToLocalChecked(), - Nan::New(tp.partition)); + (tp_obj).Set(Napi::String::New(env, "topic"), + Napi::String::New(env, tp.topic.c_str())); + (tp_obj).Set(Napi::String::New(env, "partition"), + Napi::Number::New(env, tp.partition)); if (tp.offset >= 0) { - Nan::Set(tp_obj, Nan::New("offset").ToLocalChecked(), - Nan::New(tp.offset)); + (tp_obj).Set(Napi::String::New(env, "offset"), + Napi::Number::New(env, tp.offset)); } - Nan::Set(tp_array, i, tp_obj); + (tp_array).Set(i, tp_obj); } return tp_array; @@ -100,7 +99,7 @@ void Dispatcher::Execute() { } } -void Dispatcher::Dispatch(const int _argc, Local _argv[]) { +void Dispatcher::Dispatch(const int _argc, Napi::Value _argv[]) { // This should probably be an array of v8 values if (!HasCallbacks()) { return; @@ -111,15 +110,15 @@ void Dispatcher::Dispatch(const int _argc, Local _argv[]) { } } -void Dispatcher::AddCallback(const v8::Local &cb) { - Nan::Callback *value = new Nan::Callback(cb); +void Dispatcher::AddCallback(const Napi::Function &cb) { + Napi::FunctionReference *value = new Napi::FunctionReference(cb); callbacks.push_back(value); } -void Dispatcher::RemoveCallback(const v8::Local &cb) { +void Dispatcher::RemoveCallback(const Napi::Function &cb) { for (size_t i=0; i < callbacks.size(); i++) { if (callbacks[i]->GetFunction() == cb) { - Nan::Callback *found_callback = callbacks[i]; + Napi::FunctionReference *found_callback = callbacks[i]; callbacks.erase(callbacks.begin() + i); delete found_callback; break; @@ -187,7 +186,7 @@ void EventDispatcher::Add(const event_t &e) { } void EventDispatcher::Flush() { - Nan::HandleScope scope; + Napi::HandleScope scope(env); // Iterate through each of the currently stored events // generate a callback object for each, setting to the members // then @@ -202,56 +201,56 @@ void EventDispatcher::Flush() { } for (size_t i=0; i < _events.size(); i++) { - Local argv[argc] = {}; - Local jsobj = Nan::New(); + Napi::Value argv[argc] = {}; + Napi::Object jsobj = Napi::Object::New(env); switch (_events[i].type) { case RdKafka::Event::EVENT_ERROR: - argv[0] = Nan::New("error").ToLocalChecked(); - argv[1] = Nan::Error(_events[i].message.c_str()); + argv[0] = Napi::String::New(env, "error"); + argv[1] = Napi::Error::New(env, _events[i].message.c_str()); // if (event->err() == RdKafka::ERR__ALL_BROKERS_DOWN). Stop running // This may be better suited to the node side of things break; case RdKafka::Event::EVENT_STATS: - argv[0] = Nan::New("stats").ToLocalChecked(); + argv[0] = Napi::String::New(env, "stats"); - Nan::Set(jsobj, Nan::New("message").ToLocalChecked(), - Nan::New(_events[i].message.c_str()).ToLocalChecked()); + (jsobj).Set(Napi::String::New(env, "message"), + Napi::String::New(env, _events[i].message.c_str())); break; case RdKafka::Event::EVENT_LOG: - argv[0] = Nan::New("log").ToLocalChecked(); + argv[0] = Napi::String::New(env, "log"); - Nan::Set(jsobj, Nan::New("severity").ToLocalChecked(), - Nan::New(_events[i].severity)); - Nan::Set(jsobj, Nan::New("fac").ToLocalChecked(), - Nan::New(_events[i].fac.c_str()).ToLocalChecked()); - Nan::Set(jsobj, Nan::New("message").ToLocalChecked(), - Nan::New(_events[i].message.c_str()).ToLocalChecked()); - Nan::Set(jsobj, Nan::New("name").ToLocalChecked(), - Nan::New(this->client_name.c_str()).ToLocalChecked()); + (jsobj).Set(Napi::String::New(env, "severity"), + Napi::New(env, _events[i].severity)); + (jsobj).Set(Napi::String::New(env, "fac"), + Napi::New(env, _events[i].fac.c_str())); + (jsobj).Set(Napi::String::New(env, "message"), + Napi::New(env, _events[i].message.c_str())); + (jsobj).Set(Napi::String::New(env, "name"), + Napi::New(env, this->client_name.c_str())); break; case RdKafka::Event::EVENT_THROTTLE: - argv[0] = Nan::New("throttle").ToLocalChecked(); + argv[0] = Napi::String::New(env, "throttle"); - Nan::Set(jsobj, Nan::New("message").ToLocalChecked(), - Nan::New(_events[i].message.c_str()).ToLocalChecked()); + (jsobj).Set(Napi::String::New(env, "message"), + Napi::New(env, _events[i].message.c_str())); - Nan::Set(jsobj, Nan::New("throttleTime").ToLocalChecked(), - Nan::New(_events[i].throttle_time)); - Nan::Set(jsobj, Nan::New("brokerName").ToLocalChecked(), - Nan::New(_events[i].broker_name).ToLocalChecked()); - Nan::Set(jsobj, Nan::New("brokerId").ToLocalChecked(), - Nan::New(_events[i].broker_id)); + (jsobj).Set(Napi::String::New(env, "throttleTime"), + Napi::New(env, _events[i].throttle_time)); + (jsobj).Set(Napi::String::New(env, "brokerName"), + Napi::New(env, _events[i].broker_name)); + (jsobj).Set(Napi::String::New(env, "brokerId"), + Napi::Number::New(env, _events[i].broker_id)); break; default: - argv[0] = Nan::New("event").ToLocalChecked(); + argv[0] = Napi::String::New(env, "event"); - Nan::Set(jsobj, Nan::New("message").ToLocalChecked(), - Nan::New(events[i].message.c_str()).ToLocalChecked()); + (jsobj).Set(Napi::String::New(env, "message"), + Napi::New(env, events[i].message.c_str())); break; } @@ -279,7 +278,7 @@ size_t DeliveryReportDispatcher::Add(const DeliveryReport &e) { } void DeliveryReportDispatcher::Flush() { - Nan::HandleScope scope; + Napi::HandleScope scope(env); const unsigned int argc = 2; @@ -297,41 +296,41 @@ void DeliveryReportDispatcher::Flush() { } for (size_t i = 0; i < events_list.size(); i++) { - v8::Local argv[argc] = {}; + Napi::Value argv[argc] = {}; const DeliveryReport& event = events_list[i]; if (event.is_error) { // If it is an error we need the first argument to be set - argv[0] = Nan::New(event.error_code); + argv[0] = Napi::New(env, event.error_code); } else { - argv[0] = Nan::Null(); + argv[0] = env.Null(); } - Local jsobj(Nan::New()); + Napi::Object jsobj(Napi::Object::New(env)); - Nan::Set(jsobj, Nan::New("topic").ToLocalChecked(), - Nan::New(event.topic_name).ToLocalChecked()); - Nan::Set(jsobj, Nan::New("partition").ToLocalChecked(), - Nan::New(event.partition)); - Nan::Set(jsobj, Nan::New("offset").ToLocalChecked(), - Nan::New(event.offset)); + (jsobj).Set(Napi::String::New(env, "topic"), + Napi::New(env, event.topic_name)); + (jsobj).Set(Napi::String::New(env, "partition"), + Napi::Number::New(env, event.partition)); + (jsobj).Set(Napi::String::New(env, "offset"), + Napi::Number::New(env, event.offset)); if (event.key) { - Nan::MaybeLocal buff = Nan::NewBuffer( + Napi::MaybeLocal buff = Napi::Buffer::New(env, static_cast(event.key), static_cast(event.key_len)); - Nan::Set(jsobj, Nan::New("key").ToLocalChecked(), - buff.ToLocalChecked()); + (jsobj).Set(Napi::String::New(env, "key"), + buff); } else { - Nan::Set(jsobj, Nan::New("key").ToLocalChecked(), Nan::Null()); + (jsobj).Set(Napi::String::New(env, "key"), env.Null()); } if (event.opaque) { - Nan::Persistent * persistent = - static_cast *>(event.opaque); - v8::Local object = Nan::New(*persistent); - Nan::Set(jsobj, Nan::New("opaque").ToLocalChecked(), object); + Napi::Persistent * persistent = + static_cast *>(event.opaque); + Napi::Value object = Napi::New(env, *persistent); + (jsobj).Set(Napi::String::New(env, "opaque"), object); // Okay... now reset and destroy the persistent handle persistent->Reset(); @@ -341,26 +340,26 @@ void DeliveryReportDispatcher::Flush() { } if (event.timestamp > -1) { - Nan::Set(jsobj, Nan::New("timestamp").ToLocalChecked(), - Nan::New(event.timestamp)); + (jsobj).Set(Napi::String::New(env, "timestamp"), + Napi::Number::New(env, event.timestamp)); } if (event.m_include_payload) { if (event.payload) { - Nan::MaybeLocal buff = Nan::NewBuffer( + Napi::MaybeLocal buff = Napi::Buffer::New(env, static_cast(event.payload), static_cast(event.len)); - Nan::Set(jsobj, Nan::New("value").ToLocalChecked(), - buff.ToLocalChecked()); + (jsobj).Set(Napi::String::New(env, "value"), + buff); } else { - Nan::Set(jsobj, Nan::New("value").ToLocalChecked(), - Nan::Null()); + (jsobj).Set(Napi::String::New(env, "value"), + env.Null()); } } - Nan::Set(jsobj, Nan::New("size").ToLocalChecked(), - Nan::New(event.len)); + (jsobj).Set(Napi::String::New(env, "size"), + Napi::Number::New(env, event.len)); argv[1] = jsobj; @@ -418,7 +417,7 @@ DeliveryReport::DeliveryReport(RdKafka::Message &message, bool include_payload) len = message.len(); if (m_include_payload && message.payload()) { - // this pointer will be owned and freed by the Nan::NewBuffer + // this pointer will be owned and freed by the Napi::NewBuffer // created in DeliveryReportDispatcher::Flush() payload = malloc(len); memcpy(payload, message.payload(), len); @@ -464,7 +463,7 @@ void RebalanceDispatcher::Add(const rebalance_event_t &e) { } void RebalanceDispatcher::Flush() { - Nan::HandleScope scope; + Napi::HandleScope scope(env); // Iterate through each of the currently stored events // generate a callback object for each, setting to the members // then @@ -480,13 +479,13 @@ void RebalanceDispatcher::Flush() { } for (size_t i=0; i < events.size(); i++) { - v8::Local argv[argc] = {}; + Napi::Value argv[argc] = {}; if (events[i].err == RdKafka::ERR_NO_ERROR) { - argv[0] = Nan::Undefined(); + argv[0] = env.Undefined(); } else { // ERR__ASSIGN_PARTITIONS? Special case? Nah - argv[0] = Nan::New(events[i].err); + argv[0] = Napi::New(env, events[i].err); } std::vector parts = events[i].partitions; @@ -515,7 +514,7 @@ void OffsetCommitDispatcher::Add(const offset_commit_event_t &e) { } void OffsetCommitDispatcher::Flush() { - Nan::HandleScope scope; + Napi::HandleScope scope(env); // Iterate through each of the currently stored events // generate a callback object for each, setting to the members // then @@ -531,12 +530,12 @@ void OffsetCommitDispatcher::Flush() { } for (size_t i = 0; i < events.size(); i++) { - v8::Local argv[argc] = {}; + Napi::Value argv[argc] = {}; if (events[i].err == RdKafka::ERR_NO_ERROR) { - argv[0] = Nan::Undefined(); + argv[0] = env.Undefined(); } else { - argv[0] = Nan::New(events[i].err); + argv[0] = Napi::New(env, events[i].err); } // Now convert the TopicPartition list to a JS array @@ -560,7 +559,7 @@ void OAuthBearerTokenRefreshDispatcher::Add( } void OAuthBearerTokenRefreshDispatcher::Flush() { - Nan::HandleScope scope; + Napi::HandleScope scope(env); const unsigned int argc = 1; @@ -571,8 +570,8 @@ void OAuthBearerTokenRefreshDispatcher::Flush() { m_oauthbearer_config.clear(); } - v8::Local argv[argc] = {}; - argv[0] = Nan::New(oauthbearer_config.c_str()).ToLocalChecked(); + Napi::Value argv[argc] = {}; + argv[0] = Napi::String::New(env, oauthbearer_config.c_str()); Dispatch(argc, argv); } @@ -598,27 +597,27 @@ int32_t Partitioner::partitioner_cb(const RdKafka::Topic *topic, return random(topic, partition_cnt); } - Local argv[3] = {}; + Napi::Value argv[3] = {}; - argv[0] = Nan::New(topic->name().c_str()).ToLocalChecked(); + argv[0] = Napi::String::New(env, topic->name().c_str()); if (key->empty()) { - argv[1] = Nan::Null(); + argv[1] = env.Null(); } else { - argv[1] = Nan::New(key->c_str()).ToLocalChecked(); + argv[1] = Napi::String::New(env, key->c_str()); } - argv[2] = Nan::New(partition_cnt); + argv[2] = Napi::Int32::New(env, partition_cnt); - v8::Local return_value = callback.Call(3, argv); + Napi::Value return_value = callback.Call(3, argv); - Nan::Maybe partition_return = Nan::To(return_value); + Napi::Maybe partition_return = return_value.As().Int32Value(); int32_t chosen_partition; if (partition_return.IsNothing()) { chosen_partition = RdKafka::Topic::PARTITION_UA; } else { - chosen_partition = partition_return.FromJust(); + chosen_partition = partition_return; } if (!topic->partition_available(chosen_partition)) { @@ -645,7 +644,7 @@ unsigned int Partitioner::random(const RdKafka::Topic *topic, int32_t max) { } } -void Partitioner::SetCallback(v8::Local cb) { +void Partitioner::SetCallback(Napi::Function cb) { callback(cb); } @@ -653,7 +652,7 @@ QueueNotEmptyDispatcher::QueueNotEmptyDispatcher() {} QueueNotEmptyDispatcher::~QueueNotEmptyDispatcher() {} void QueueNotEmptyDispatcher::Flush() { - Nan::HandleScope scope; + Napi::HandleScope scope(env); const unsigned int argc = 0; Dispatch(argc, nullptr); diff --git a/src/callbacks.h b/src/callbacks.h index 7c0427c8..6dae54f5 100644 --- a/src/callbacks.h +++ b/src/callbacks.h @@ -11,7 +11,8 @@ #define SRC_CALLBACKS_H_ #include -#include +#include +#include #include #include @@ -29,9 +30,9 @@ class Dispatcher { public: Dispatcher(); ~Dispatcher(); - void Dispatch(const int, v8::Local []); - void AddCallback(const v8::Local&); - void RemoveCallback(const v8::Local&); + void Dispatch(const int, Napi::Value []); + void AddCallback(const Napi::Function&); + void RemoveCallback(const Napi::Function&); bool HasCallbacks(); virtual void Flush() = 0; void Execute(); @@ -39,12 +40,12 @@ class Dispatcher { void Deactivate(); protected: - std::vector callbacks; // NOLINT + std::vector callbacks; // NOLINT uv_mutex_t async_lock; private: - NAN_INLINE static NAUV_WORK_CB(AsyncMessage_) { + inline static NAUV_WORK_CB(AsyncMessage_) { Dispatcher *dispatcher = static_cast(async->data); dispatcher->Flush(); @@ -268,8 +269,8 @@ class Partitioner : public RdKafka::PartitionerCb { Partitioner(); ~Partitioner(); int32_t partitioner_cb( const RdKafka::Topic*, const std::string*, int32_t, void*); // NOLINT - Nan::Callback callback; // NOLINT - void SetCallback(v8::Local); + Napi::FunctionReference callback; // NOLINT + void SetCallback(Napi::Function); private: static unsigned int djb_hash(const char*, size_t); static unsigned int random(const RdKafka::Topic*, int32_t); diff --git a/src/common.cc b/src/common.cc index e488e02e..f1f88b15 100644 --- a/src/common.cc +++ b/src/common.cc @@ -21,83 +21,83 @@ void Log(std::string str) { } template -T GetParameter(v8::Local object, std::string field_name, T def) { - v8::Local field = Nan::New(field_name.c_str()).ToLocalChecked(); - if (Nan::Has(object, field).FromMaybe(false)) { - Nan::Maybe maybeT = Nan::To(Nan::Get(object, field).ToLocalChecked()); +T GetParameter(Napi::Object object, std::string field_name, T def) { + Napi::String field = Napi::New(env, field_name.c_str()); + if ((object).Has(field).FromMaybe(false)) { + Napi::Maybe maybeT = Napi::To((object).Get(field)); if (maybeT.IsNothing()) { return def; } else { - return maybeT.FromJust(); + return maybeT; } } return def; } template<> -int64_t GetParameter(v8::Local object, +int64_t GetParameter(Napi::Object object, std::string field_name, int64_t def) { - v8::Local field = Nan::New(field_name.c_str()).ToLocalChecked(); - if (Nan::Has(object, field).FromMaybe(false)) { - v8::Local v = Nan::Get(object, field).ToLocalChecked(); + Napi::String field = Napi::New(env, field_name.c_str()); + if ((object).Has(field).FromMaybe(false)) { + Napi::Value v = (object).Get(field); - if (!v->IsNumber()) { + if (!v.IsNumber()) { return def; } - Nan::Maybe maybeInt = Nan::To(v); + Napi::Maybe maybeInt = v.As().Int64Value(); if (maybeInt.IsNothing()) { return def; } else { - return maybeInt.FromJust(); + return maybeInt; } } return def; } template<> -bool GetParameter(v8::Local object, +bool GetParameter(Napi::Object object, std::string field_name, bool def) { - v8::Local field = Nan::New(field_name.c_str()).ToLocalChecked(); - if (Nan::Has(object, field).FromMaybe(false)) { - v8::Local v = Nan::Get(object, field).ToLocalChecked(); + Napi::String field = Napi::New(env, field_name.c_str()); + if ((object).Has(field).FromMaybe(false)) { + Napi::Value v = (object).Get(field); if (!v->IsBoolean()) { return def; } - Nan::Maybe maybeInt = Nan::To(v); + Napi::Maybe maybeInt = v.As().Value(); if (maybeInt.IsNothing()) { return def; } else { - return maybeInt.FromJust(); + return maybeInt; } } return def; } template<> -int GetParameter(v8::Local object, +int GetParameter(Napi::Object object, std::string field_name, int def) { return static_cast(GetParameter(object, field_name, def)); } template<> -std::string GetParameter(v8::Local object, +std::string GetParameter(Napi::Object object, std::string field_name, std::string def) { - v8::Local field = Nan::New(field_name.c_str()).ToLocalChecked(); - if (Nan::Has(object, field).FromMaybe(false)) { - v8::Local parameter = - Nan::Get(object, field).ToLocalChecked(); - // Nan::To(); + Napi::String field = Napi::New(env, field_name.c_str()); + if ((object).Has(field).FromMaybe(false)) { + Napi::Value parameter = + (object).Get(field); + // Napi::To(); if (!parameter->IsUndefined() && !parameter->IsNull()) { - v8::Local val = Nan::To(parameter) - .ToLocalChecked(); + Napi::String val = parameter.To() + ; if (!val->IsUndefined() && !val->IsNull()) { - Nan::Utf8String parameterValue(val); + std::string parameterValue = val.As(); std::string parameterString(*parameterValue); return parameterString; @@ -109,34 +109,34 @@ std::string GetParameter(v8::Local object, template<> std::vector GetParameter >( - v8::Local object, std::string field_name, + Napi::Object object, std::string field_name, std::vector def) { - v8::Local field = Nan::New(field_name.c_str()).ToLocalChecked(); + Napi::String field = Napi::New(env, field_name.c_str()); - if (Nan::Has(object, field).FromMaybe(false)) { - v8::Local maybeArray = Nan::Get(object, field).ToLocalChecked(); + if ((object).Has(field).FromMaybe(false)) { + Napi::Value maybeArray = (object).Get(field); if (maybeArray->IsArray()) { - v8::Local parameter = maybeArray.As(); + Napi::Array parameter = maybeArray.As(); return v8ArrayToStringVector(parameter); } } return def; } -std::vector v8ArrayToStringVector(v8::Local parameter) { +std::vector v8ArrayToStringVector(Napi::Array parameter) { std::vector newItem; if (parameter->Length() >= 1) { for (unsigned int i = 0; i < parameter->Length(); i++) { - v8::Local v; - if (!Nan::Get(parameter, i).ToLocal(&v)) { + Napi::Value v; + if (!(parameter).Get(i).ToLocal(&v)) { continue; } - Nan::MaybeLocal p = Nan::To(v); + Napi::MaybeLocal p = v.To(); if (p.IsEmpty()) { continue; } - Nan::Utf8String pVal(p.ToLocalChecked()); + std::string pVal = p.ToLocalChecked(.As()); std::string pString(*pVal); newItem.push_back(pString); } @@ -144,19 +144,19 @@ std::vector v8ArrayToStringVector(v8::Local parameter) { return newItem; } -std::list v8ArrayToStringList(v8::Local parameter) { +std::list v8ArrayToStringList(Napi::Array parameter) { std::list newItem; if (parameter->Length() >= 1) { for (unsigned int i = 0; i < parameter->Length(); i++) { - v8::Local v; - if (!Nan::Get(parameter, i).ToLocal(&v)) { + Napi::Value v; + if (!(parameter).Get(i).ToLocal(&v)) { continue; } - Nan::MaybeLocal p = Nan::To(v); + Napi::MaybeLocal p = v.To(); if (p.IsEmpty()) { continue; } - Nan::Utf8String pVal(p.ToLocalChecked()); + std::string pVal = p.ToLocalChecked(.As()); std::string pString(*pVal); newItem.push_back(pString); } @@ -164,16 +164,16 @@ std::list v8ArrayToStringList(v8::Local parameter) { return newItem; } -template<> v8::Local GetParameter >( - v8::Local object, +template<> Napi::Array GetParameter( + Napi::Object object, std::string field_name, - v8::Local def) { - v8::Local field = Nan::New(field_name.c_str()).ToLocalChecked(); + Napi::Array def) { + Napi::String field = Napi::New(env, field_name.c_str()); - if (Nan::Has(object, field).FromMaybe(false)) { - v8::Local maybeArray = Nan::Get(object, field).ToLocalChecked(); + if ((object).Has(field).FromMaybe(false)) { + Napi::Value maybeArray = (object).Get(field); if (maybeArray->IsArray()) { - v8::Local parameter = maybeArray.As(); + Napi::Array parameter = maybeArray.As(); return parameter; } } @@ -184,29 +184,29 @@ template<> v8::Local GetParameter >( namespace Conversion { namespace Util { -std::vector ToStringVector(v8::Local parameter) { +std::vector ToStringVector(Napi::Array parameter) { std::vector newItem; if (parameter->Length() >= 1) { for (unsigned int i = 0; i < parameter->Length(); i++) { - v8::Local element; - if (!Nan::Get(parameter, i).ToLocal(&element)) { + Napi::Value element; + if (!(parameter).Get(i).ToLocal(&element)) { continue; } if (!element->IsRegExp()) { - Nan::MaybeLocal p = Nan::To(element); + Napi::MaybeLocal p = element.To(); if (p.IsEmpty()) { continue; } - Nan::Utf8String pVal(p.ToLocalChecked()); + std::string pVal = p.ToLocalChecked(.As()); std::string pString(*pVal); newItem.push_back(pString); } else { - Nan::Utf8String pVal(element.As()->GetSource()); + std::string pVal = element.As(.As()->GetSource()); std::string pString(*pVal); Log(pString); @@ -219,12 +219,12 @@ std::vector ToStringVector(v8::Local parameter) { return newItem; } -v8::Local ToV8Array(std::vector parameter) { - v8::Local newItem = Nan::New(); +Napi::Array ToV8Array(std::vector parameter) { + Napi::Array newItem = Napi::Array::New(env); for (size_t i = 0; i < parameter.size(); i++) { std::string topic = parameter[i]; - Nan::Set(newItem, i, Nan::New(topic).ToLocalChecked()); + (newItem).Set(i, Napi::String::New(env, topic)); } return newItem; @@ -234,15 +234,15 @@ v8::Local ToV8Array(std::vector parameter) { * @brief Converts a list of rd_kafka_error_t* into a v8 array of RdKafkaError * objects. */ -v8::Local ToV8Array(const rd_kafka_error_t** error_list, +Napi::Array ToV8Array(const rd_kafka_error_t** error_list, size_t error_cnt) { - v8::Local errors = Nan::New(); + Napi::Array errors = Napi::Array::New(env); for (size_t i = 0; i < error_cnt; i++) { RdKafka::ErrorCode code = static_cast(rd_kafka_error_code(error_list[i])); std::string msg = std::string(rd_kafka_error_string(error_list[i])); - Nan::Set(errors, i, RdKafkaError(code, msg)); + (errors).Set(i, RdKafkaError(code, msg)); } return errors; @@ -251,7 +251,7 @@ v8::Local ToV8Array(const rd_kafka_error_t** error_list, /** * @brief Converts a rd_kafka_Node_t* into a v8 object. */ -v8::Local ToV8Object(const rd_kafka_Node_t* node) { +Napi::Object ToV8Object(const rd_kafka_Node_t* node) { /* Return object type { id: number @@ -260,19 +260,19 @@ v8::Local ToV8Object(const rd_kafka_Node_t* node) { rack?: string } */ - v8::Local obj = Nan::New(); + Napi::Object obj = Napi::Object::New(env); - Nan::Set(obj, Nan::New("id").ToLocalChecked(), - Nan::New(rd_kafka_Node_id(node))); - Nan::Set(obj, Nan::New("host").ToLocalChecked(), - Nan::New(rd_kafka_Node_host(node)).ToLocalChecked()); - Nan::Set(obj, Nan::New("port").ToLocalChecked(), - Nan::New(rd_kafka_Node_port(node))); + (obj).Set(Napi::String::New(env, "id"), + Napi::Number::New(env, rd_kafka_Node_id(node))); + (obj).Set(Napi::String::New(env, "host"), + Napi::String::New(env, rd_kafka_Node_host(node))); + (obj).Set(Napi::String::New(env, "port"), + Napi::Number::New(env, rd_kafka_Node_port(node))); const char* rack = rd_kafka_Node_rack(node); if (rack) { - Nan::Set(obj, Nan::New("rack").ToLocalChecked(), - Nan::New(rack).ToLocalChecked()); + (obj).Set(Napi::String::New(env, "rack"), + Napi::String::New(env, rack)); } return obj; @@ -281,7 +281,7 @@ v8::Local ToV8Object(const rd_kafka_Node_t* node) { /** * @brief Converts a rd_kafka_Uuid_t* into a v8 object. */ -v8::Local UuidToV8Object(const rd_kafka_Uuid_t* uuid) { +Napi::Object UuidToV8Object(const rd_kafka_Uuid_t* uuid) { /*Return object type { mostSignificantBits: bigint @@ -289,17 +289,17 @@ v8::Local UuidToV8Object(const rd_kafka_Uuid_t* uuid) { base64: string } */ - v8::Local obj = Nan::New(); + Napi::Object obj = Napi::Object::New(env); - Nan::Set(obj, Nan::New("mostSignificantBits").ToLocalChecked(), + (obj).Set(Napi::String::New(env, "mostSignificantBits"), v8::BigInt::New(v8::Isolate::GetCurrent(), rd_kafka_Uuid_most_significant_bits(uuid))); - Nan::Set(obj, Nan::New("leastSignificantBits").ToLocalChecked(), + (obj).Set(Napi::String::New(env, "leastSignificantBits"), v8::BigInt::New(v8::Isolate::GetCurrent(), rd_kafka_Uuid_least_significant_bits(uuid))); - Nan::Set( - obj, Nan::New("base64").ToLocalChecked(), - Nan::New(rd_kafka_Uuid_base64str(uuid)).ToLocalChecked()); + ( + obj).Set(Napi::String::New(env, "base64"), + Napi::String::New(env, rd_kafka_Uuid_base64str(uuid))); return obj; } @@ -307,13 +307,13 @@ v8::Local UuidToV8Object(const rd_kafka_Uuid_t* uuid) { /** * @brief Converts a list of rd_kafka_AclOperation_t into a v8 array. */ -v8::Local ToV8Array( +Napi::Array ToV8Array( const rd_kafka_AclOperation_t* authorized_operations, size_t authorized_operations_cnt) { - v8::Local array = Nan::New(); + Napi::Array array = Napi::Array::New(env); for (size_t i = 0; i < authorized_operations_cnt; i++) { - Nan::Set(array, i, Nan::New(authorized_operations[i])); + (array).Set(i, Napi::Number::New(env, authorized_operations[i])); } return array; @@ -333,9 +333,9 @@ namespace TopicPartition { * use `ToTopicPartitionV8Array(const rd_kafka_topic_partition_list_t*, * bool)`. */ -v8::Local ToV8Array( +Napi::Array ToV8Array( std::vector & topic_partition_list) { // NOLINT - v8::Local array = Nan::New(); + Napi::Array array = Napi::Array::New(env); for (size_t topic_partition_i = 0; topic_partition_i < topic_partition_list.size(); topic_partition_i++) { RdKafka::TopicPartition* topic_partition = @@ -344,42 +344,42 @@ v8::Local ToV8Array( // TODO: why do we set the entire array element to be an error rather adding // an error field to TopicPartition? Or create a TopicPartitionError? if (topic_partition->err() != RdKafka::ErrorCode::ERR_NO_ERROR) { - Nan::Set(array, topic_partition_i, - Nan::Error(Nan::New(RdKafka::err2str(topic_partition->err())) - .ToLocalChecked())); + (array).Set(topic_partition_i, + Napi::Error::New(env, Napi::New(env, RdKafka::err2str(topic_partition->err())) + )); } else { // We have the list now let's get the properties from it - v8::Local obj = Nan::New(); + Napi::Object obj = Napi::Object::New(env); if (topic_partition->offset() != RdKafka::Topic::OFFSET_INVALID) { - Nan::Set(obj, Nan::New("offset").ToLocalChecked(), - Nan::New(topic_partition->offset())); + (obj).Set(Napi::String::New(env, "offset"), + Napi::Number::New(env, topic_partition->offset())); } // If present, size >= 1, since it will include at least the // null terminator. if (topic_partition->get_metadata().size() > 0) { - Nan::Set(obj, Nan::New("metadata").ToLocalChecked(), - Nan::New( + (obj).Set(Napi::String::New(env, "metadata"), + Napi::String::New(env, reinterpret_cast(topic_partition->get_metadata().data()), // NOLINT // null terminator is not required by the constructor. topic_partition->get_metadata().size() - 1) - .ToLocalChecked()); + ); } - Nan::Set(obj, Nan::New("partition").ToLocalChecked(), - Nan::New(topic_partition->partition())); - Nan::Set(obj, Nan::New("topic").ToLocalChecked(), - Nan::New(topic_partition->topic().c_str()) - .ToLocalChecked()); + (obj).Set(Napi::String::New(env, "partition"), + Napi::Number::New(env, topic_partition->partition())); + (obj).Set(Napi::String::New(env, "topic"), + Napi::String::New(env, topic_partition->topic().c_str()) + ); int leader_epoch = topic_partition->get_leader_epoch(); if (leader_epoch >= 0) { - Nan::Set(obj, Nan::New("leaderEpoch").ToLocalChecked(), - Nan::New(leader_epoch)); + (obj).Set(Napi::String::New(env, "leaderEpoch"), + Napi::Number::New(env, leader_epoch)); } - Nan::Set(array, topic_partition_i, obj); + (array).Set(topic_partition_i, obj); } } @@ -398,41 +398,41 @@ v8::Local ToV8Array( * array elements, unlike the `ToV8Array(std::vector & * topic_partition_list)`. */ -v8::Local ToTopicPartitionV8Array( +Napi::Array ToTopicPartitionV8Array( const rd_kafka_topic_partition_list_t* topic_partition_list, bool include_offset) { - v8::Local array = Nan::New(); + Napi::Array array = Napi::Array::New(env); for (int topic_partition_i = 0; topic_partition_i < topic_partition_list->cnt; topic_partition_i++) { rd_kafka_topic_partition_t topic_partition = topic_partition_list->elems[topic_partition_i]; - v8::Local obj = Nan::New(); + Napi::Object obj = Napi::Object::New(env); - Nan::Set(obj, Nan::New("partition").ToLocalChecked(), - Nan::New(topic_partition.partition)); - Nan::Set(obj, Nan::New("topic").ToLocalChecked(), - Nan::New(topic_partition.topic).ToLocalChecked()); + (obj).Set(Napi::String::New(env, "partition"), + Napi::Number::New(env, topic_partition.partition)); + (obj).Set(Napi::String::New(env, "topic"), + Napi::String::New(env, topic_partition.topic)); if (topic_partition.err != RD_KAFKA_RESP_ERR_NO_ERROR) { - v8::Local error = NodeKafka::RdKafkaError( + Napi::Object error = NodeKafka::RdKafkaError( static_cast(topic_partition.err)); - Nan::Set(obj, Nan::New("error").ToLocalChecked(), error); + (obj).Set(Napi::String::New(env, "error"), error); } if (include_offset) { - Nan::Set(obj, Nan::New("offset").ToLocalChecked(), - Nan::New(topic_partition.offset)); + (obj).Set(Napi::String::New(env, "offset"), + Napi::Number::New(env, topic_partition.offset)); } int leader_epoch = rd_kafka_topic_partition_get_leader_epoch(&topic_partition); if (leader_epoch >= 0) { - Nan::Set(obj, Nan::New("leaderEpoch").ToLocalChecked(), - Nan::New(leader_epoch)); + (obj).Set(Napi::String::New(env, "leaderEpoch"), + Napi::Number::New(env, leader_epoch)); } - Nan::Set(array, topic_partition_i, obj); + (array).Set(topic_partition_i, obj); } return array; } @@ -445,21 +445,21 @@ v8::Local ToTopicPartitionV8Array( * @note You must delete all the pointers inside here when you are done!! */ std::vector FromV8Array( - const v8::Local & topic_partition_list) { + const Napi::Array & topic_partition_list) { // NOTE: ARRAY OF POINTERS! DELETE THEM WHEN YOU ARE FINISHED std::vector array; for (size_t topic_partition_i = 0; topic_partition_i < topic_partition_list->Length(); topic_partition_i++) { - v8::Local topic_partition_value; - if (!Nan::Get(topic_partition_list, topic_partition_i) + Napi::Value topic_partition_value; + if (!(topic_partition_list).Get(topic_partition_i) .ToLocal(&topic_partition_value)) { continue; } - if (topic_partition_value->IsObject()) { + if (topic_partition_value.IsObject()) { array.push_back(FromV8Object( - Nan::To(topic_partition_value).ToLocalChecked())); + topic_partition_value.To())); } } @@ -473,21 +473,21 @@ std::vector FromV8Array( * offset?: number}] to a rd_kafka_topic_partition_list_t */ rd_kafka_topic_partition_list_t* TopicPartitionv8ArrayToTopicPartitionList( - v8::Local parameter, bool include_offset) { + Napi::Array parameter, bool include_offset) { rd_kafka_topic_partition_list_t* newList = rd_kafka_topic_partition_list_new(parameter->Length()); for (unsigned int i = 0; i < parameter->Length(); i++) { - v8::Local v; - if (!Nan::Get(parameter, i).ToLocal(&v)) { + Napi::Value v; + if (!(parameter).Get(i).ToLocal(&v)) { continue; } - if (!v->IsObject()) { + if (!v.IsObject()) { return NULL; // Return NULL to indicate an error } - v8::Local item = v.As(); + Napi::Object item = v.As(); std::string topic = GetParameter(item, "topic", ""); int partition = GetParameter(item, "partition", -1); @@ -512,21 +512,21 @@ rd_kafka_topic_partition_list_t* TopicPartitionv8ArrayToTopicPartitionList( */ rd_kafka_topic_partition_list_t* TopicPartitionOffsetSpecv8ArrayToTopicPartitionList( - v8::Local parameter) { + Napi::Array parameter) { rd_kafka_topic_partition_list_t* newList = rd_kafka_topic_partition_list_new(parameter->Length()); for (unsigned int i = 0; i < parameter->Length(); i++) { - v8::Local v; - if (!Nan::Get(parameter, i).ToLocal(&v)) { + Napi::Value v; + if (!(parameter).Get(i).ToLocal(&v)) { continue; } - if (!v->IsObject()) { + if (!v.IsObject()) { return NULL; // Return NULL to indicate an error } - v8::Local item = v.As(); + Napi::Object item = v.As(); std::string topic = GetParameter(item, "topic", ""); int partition = GetParameter(item, "partition", -1); @@ -534,9 +534,9 @@ TopicPartitionOffsetSpecv8ArrayToTopicPartitionList( rd_kafka_topic_partition_t* toppar = rd_kafka_topic_partition_list_add(newList, topic.c_str(), partition); - v8::Local offsetValue = - Nan::Get(item, Nan::New("offset").ToLocalChecked()).ToLocalChecked(); - v8::Local offsetObject = offsetValue.As(); + Napi::Value offsetValue = + (item).Get(Napi::String::New(env, "offset")); + Napi::Object offsetObject = offsetValue.As(); int64_t offset = GetParameter(offsetObject, "timestamp", 0); toppar->offset = offset; @@ -548,7 +548,7 @@ TopicPartitionOffsetSpecv8ArrayToTopicPartitionList( * @brief v8::Object to RdKafka::TopicPartition * */ -RdKafka::TopicPartition * FromV8Object(v8::Local topic_partition) { +RdKafka::TopicPartition * FromV8Object(Napi::Object topic_partition) { std::string topic = GetParameter(topic_partition, "topic", ""); int partition = GetParameter(topic_partition, "partition", -1); int64_t offset = GetParameter(topic_partition, "offset", 0); @@ -564,13 +564,13 @@ return NULL; RdKafka::TopicPartition *toppar = RdKafka::TopicPartition::create(topic, partition, offset); - v8::Local metadataKey = Nan::New("metadata").ToLocalChecked(); - if (Nan::Has(topic_partition, metadataKey).FromMaybe(false)) { - v8::Local metadataValue = - Nan::Get(topic_partition, metadataKey).ToLocalChecked(); + Napi::String metadataKey = Napi::String::New(env, "metadata"); + if ((topic_partition).Has(metadataKey).FromMaybe(false)) { + Napi::Value metadataValue = + (topic_partition).Get(metadataKey); - if (metadataValue->IsString()) { - Nan::Utf8String metadataValueUtf8Str(metadataValue.As()); + if (metadataValue.IsString()) { + std::string metadataValueUtf8Str = metadataValue.As(.As()); std::string metadataValueStr(*metadataValueUtf8Str); std::vector metadataVector(metadataValueStr.begin(), metadataValueStr.end()); @@ -581,14 +581,14 @@ return NULL; } toppar->set_leader_epoch(-1); - v8::Local leaderEpochKey = - Nan::New("leaderEpoch").ToLocalChecked(); - if (Nan::Has(topic_partition, leaderEpochKey).FromMaybe(false)) { - v8::Local leaderEpochValue = - Nan::Get(topic_partition, leaderEpochKey).ToLocalChecked(); - - if (leaderEpochValue->IsNumber()) { - int32_t leaderEpoch = Nan::To(leaderEpochValue).FromJust(); + Napi::String leaderEpochKey = + Napi::String::New(env, "leaderEpoch"); + if ((topic_partition).Has(leaderEpochKey).FromMaybe(false)) { + Napi::Value leaderEpochValue = + (topic_partition).Get(leaderEpochKey); + + if (leaderEpochValue.IsNumber()) { + int32_t leaderEpoch = leaderEpochValue.As().Int32Value(); toppar->set_leader_epoch(leaderEpoch); } } @@ -604,11 +604,11 @@ namespace Metadata { * @brief RdKafka::Metadata to v8::Object * */ -v8::Local ToV8Object(RdKafka::Metadata* metadata) { - v8::Local obj = Nan::New(); +Napi::Object ToV8Object(RdKafka::Metadata* metadata) { + Napi::Object obj = Napi::Object::New(env); - v8::Local broker_data = Nan::New(); - v8::Local topic_data = Nan::New(); + Napi::Array broker_data = Napi::Array::New(env); + Napi::Array topic_data = Napi::Array::New(env); const BrokerMetadataList* brokers = metadata->brokers(); // NOLINT @@ -620,16 +620,16 @@ v8::Local ToV8Object(RdKafka::Metadata* metadata) { const RdKafka::BrokerMetadata* x = *it; - v8::Local current_broker = Nan::New(); + Napi::Object current_broker = Napi::Object::New(env); - Nan::Set(current_broker, Nan::New("id").ToLocalChecked(), - Nan::New(x->id())); - Nan::Set(current_broker, Nan::New("host").ToLocalChecked(), - Nan::New(x->host().c_str()).ToLocalChecked()); - Nan::Set(current_broker, Nan::New("port").ToLocalChecked(), - Nan::New(x->port())); + (current_broker).Set(Napi::String::New(env, "id"), + Napi::Number::New(env, x->id())); + (current_broker).Set(Napi::String::New(env, "host"), + Napi::String::New(env, x->host().c_str())); + (current_broker).Set(Napi::String::New(env, "port"), + Napi::Number::New(env, x->port())); - Nan::Set(broker_data, broker_i, current_broker); + (broker_data).Set(broker_i, current_broker); } unsigned int topic_i = 0; @@ -642,12 +642,12 @@ v8::Local ToV8Object(RdKafka::Metadata* metadata) { const RdKafka::TopicMetadata* x = *it; - v8::Local current_topic = Nan::New(); + Napi::Object current_topic = Napi::Object::New(env); - Nan::Set(current_topic, Nan::New("name").ToLocalChecked(), - Nan::New(x->topic().c_str()).ToLocalChecked()); + (current_topic).Set(Napi::String::New(env, "name"), + Napi::String::New(env, x->topic().c_str())); - v8::Local current_topic_partitions = Nan::New(); + Napi::Array current_topic_partitions = Napi::Array::New(env); const PartitionMetadataList* current_partition_data = x->partitions(); @@ -659,12 +659,12 @@ v8::Local ToV8Object(RdKafka::Metadata* metadata) { // partition iterate const RdKafka::PartitionMetadata* xx = *itt; - v8::Local current_partition = Nan::New(); + Napi::Object current_partition = Napi::Object::New(env); - Nan::Set(current_partition, Nan::New("id").ToLocalChecked(), - Nan::New(xx->id())); - Nan::Set(current_partition, Nan::New("leader").ToLocalChecked(), - Nan::New(xx->leader())); + (current_partition).Set(Napi::String::New(env, "id"), + Napi::Number::New(env, xx->id())); + (current_partition).Set(Napi::String::New(env, "leader"), + Napi::Number::New(env, xx->leader())); const std::vector * replicas = xx->replicas(); const std::vector * isrs = xx->isrs(); @@ -675,40 +675,40 @@ v8::Local ToV8Object(RdKafka::Metadata* metadata) { unsigned int r_i = 0; unsigned int i_i = 0; - v8::Local current_replicas = Nan::New(); + Napi::Array current_replicas = Napi::Array::New(env); for (r_it = replicas->begin(); r_it != replicas->end(); ++r_it, r_i++) { - Nan::Set(current_replicas, r_i, Nan::New(*r_it)); + (current_replicas).Set(r_i, Napi::Int32::New(env, *r_it)); } - v8::Local current_isrs = Nan::New(); + Napi::Array current_isrs = Napi::Array::New(env); for (i_it = isrs->begin(); i_it != isrs->end(); ++i_it, i_i++) { - Nan::Set(current_isrs, i_i, Nan::New(*i_it)); + (current_isrs).Set(i_i, Napi::Int32::New(env, *i_it)); } - Nan::Set(current_partition, Nan::New("replicas").ToLocalChecked(), + (current_partition).Set(Napi::String::New(env, "replicas"), current_replicas); - Nan::Set(current_partition, Nan::New("isrs").ToLocalChecked(), + (current_partition).Set(Napi::String::New(env, "isrs"), current_isrs); - Nan::Set(current_topic_partitions, partition_i, current_partition); + (current_topic_partitions).Set(partition_i, current_partition); } // iterate over partitions - Nan::Set(current_topic, Nan::New("partitions").ToLocalChecked(), + (current_topic).Set(Napi::String::New(env, "partitions"), current_topic_partitions); - Nan::Set(topic_data, topic_i, current_topic); + (topic_data).Set(topic_i, current_topic); } // End iterating over topics - Nan::Set(obj, Nan::New("orig_broker_id").ToLocalChecked(), - Nan::New(metadata->orig_broker_id())); + (obj).Set(Napi::String::New(env, "orig_broker_id"), + Napi::Number::New(env, metadata->orig_broker_id())); - Nan::Set(obj, Nan::New("orig_broker_name").ToLocalChecked(), - Nan::New(metadata->orig_broker_name()).ToLocalChecked()); + (obj).Set(Napi::String::New(env, "orig_broker_name"), + Napi::String::New(env, metadata->orig_broker_name())); - Nan::Set(obj, Nan::New("topics").ToLocalChecked(), topic_data); - Nan::Set(obj, Nan::New("brokers").ToLocalChecked(), broker_data); + (obj).Set(Napi::String::New(env, "topics"), topic_data); + (obj).Set(Napi::String::New(env, "brokers"), broker_data); return obj; } @@ -718,75 +718,74 @@ v8::Local ToV8Object(RdKafka::Metadata* metadata) { namespace Message { // Overload for all use cases except delivery reports -v8::Local ToV8Object(RdKafka::Message *message) { +Napi::Object ToV8Object(RdKafka::Message *message) { return ToV8Object(message, true, true); } -v8::Local ToV8Object(RdKafka::Message *message, +Napi::Object ToV8Object(RdKafka::Message *message, bool include_payload, bool include_headers) { if (message->err() == RdKafka::ERR_NO_ERROR) { - v8::Local pack = Nan::New(); + Napi::Object pack = Napi::Object::New(env); const void* message_payload = message->payload(); if (!include_payload) { - Nan::Set(pack, Nan::New("value").ToLocalChecked(), - Nan::Undefined()); + (pack).Set(Napi::String::New(env, "value"), + env.Undefined()); } else if (message_payload) { - Nan::Set(pack, Nan::New("value").ToLocalChecked(), - Nan::Encode(message_payload, message->len(), Nan::Encoding::BUFFER)); + (pack).Set(Napi::String::New(env, "value"), + Napi::Encode(message_payload, message->len(), Napi::Encoding::BUFFER)); } else { - Nan::Set(pack, Nan::New("value").ToLocalChecked(), - Nan::Null()); + (pack).Set(Napi::String::New(env, "value"), + env.Null()); } RdKafka::Headers* headers; if (((headers = message->headers()) != 0) && include_headers) { - v8::Local v8headers = Nan::New(); + Napi::Array v8headers = Napi::Array::New(env); int index = 0; std::vector all = headers->get_all(); for (std::vector::iterator it = all.begin(); it != all.end(); it++) { - v8::Local v8header = Nan::New(); - Nan::Set(v8header, Nan::New(it->key()).ToLocalChecked(), - Nan::Encode(it->value_string(), - it->value_size(), Nan::Encoding::BUFFER)); - Nan::Set(v8headers, index, v8header); + Napi::Object v8header = Napi::Object::New(env); + (v8header).Set(Napi::String::New(env, it->key()), + Napi::Encode(it->value_string(), + it->value_size(), Napi::Encoding::BUFFER)); + (v8headers).Set(index, v8header); index++; } - Nan::Set(pack, - Nan::New("headers").ToLocalChecked(), v8headers); + (pack).Set(Napi::String::New(env, "headers"), v8headers); } - Nan::Set(pack, Nan::New("size").ToLocalChecked(), - Nan::New(message->len())); + (pack).Set(Napi::String::New(env, "size"), + Napi::Number::New(env, message->len())); const void* key_payload = message->key_pointer(); if (key_payload) { // We want this to also be a buffer to avoid corruption // https://github.com/confluentinc/confluent-kafka-javascript/issues/208 - Nan::Set(pack, Nan::New("key").ToLocalChecked(), - Nan::Encode(key_payload, message->key_len(), Nan::Encoding::BUFFER)); + (pack).Set(Napi::String::New(env, "key"), + Napi::Encode(key_payload, message->key_len(), Napi::Encoding::BUFFER)); } else { - Nan::Set(pack, Nan::New("key").ToLocalChecked(), - Nan::Null()); + (pack).Set(Napi::String::New(env, "key"), + env.Null()); } - Nan::Set(pack, Nan::New("topic").ToLocalChecked(), - Nan::New(message->topic_name()).ToLocalChecked()); - Nan::Set(pack, Nan::New("offset").ToLocalChecked(), - Nan::New(message->offset())); - Nan::Set(pack, Nan::New("partition").ToLocalChecked(), - Nan::New(message->partition())); - Nan::Set(pack, Nan::New("timestamp").ToLocalChecked(), - Nan::New(message->timestamp().timestamp)); + (pack).Set(Napi::String::New(env, "topic"), + Napi::String::New(env, message->topic_name())); + (pack).Set(Napi::String::New(env, "offset"), + Napi::Number::New(env, message->offset())); + (pack).Set(Napi::String::New(env, "partition"), + Napi::Number::New(env, message->partition())); + (pack).Set(Napi::String::New(env, "timestamp"), + Napi::Number::New(env, message->timestamp().timestamp)); int32_t leader_epoch = message->leader_epoch(); if (leader_epoch >= 0) { - Nan::Set(pack, Nan::New("leaderEpoch").ToLocalChecked(), - Nan::New(leader_epoch)); + (pack).Set(Napi::String::New(env, "leaderEpoch"), + Napi::Number::New(env, leader_epoch)); } return pack; @@ -809,7 +808,7 @@ namespace Admin { * */ rd_kafka_NewTopic_t* FromV8TopicObject( - v8::Local object, std::string &errstr) { // NOLINT + Napi::Object object, std::string &errstr) { // NOLINT std::string topic_name = GetParameter(object, "topic", ""); int num_partitions = GetParameter(object, "num_partitions", 0); int replication_factor = GetParameter(object, "replication_factor", 0); @@ -830,29 +829,29 @@ rd_kafka_NewTopic_t* FromV8TopicObject( rd_kafka_resp_err_t err; - if (Nan::Has(object, Nan::New("config").ToLocalChecked()).FromMaybe(false)) { + if ((object).Has(Napi::String::New(env, "config")).FromMaybe(false)) { // Get the config v8::Object that we can get parameters on - v8::Local config = - Nan::Get(object, Nan::New("config").ToLocalChecked()) - .ToLocalChecked().As(); + Napi::Object config = + (object).Get(Napi::String::New(env, "config")) + .As(); // Get all of the keys of the object - v8::MaybeLocal config_keys = Nan::GetOwnPropertyNames(config); + v8::MaybeLocal config_keys = Napi::GetOwnPropertyNames(config); if (!config_keys.IsEmpty()) { - v8::Local field_array = config_keys.ToLocalChecked(); + Napi::Array field_array = config_keys; for (size_t i = 0; i < field_array->Length(); i++) { - v8::Local config_key = Nan::Get(field_array, i) - .ToLocalChecked().As(); - v8::Local config_value = Nan::Get(config, config_key) - .ToLocalChecked(); + Napi::String config_key = (field_array).Get(i) + .As(); + Napi::Value config_value = (config).Get(config_key) + ; // If the config value is a string... - if (config_value->IsString()) { - Nan::Utf8String pKeyVal(config_key); + if (config_value.IsString()) { + std::string pKeyVal = config_key.As(); std::string pKeyString(*pKeyVal); - Nan::Utf8String pValueVal(config_value.As()); + std::string pValueVal = config_value.As(.As()); std::string pValString(*pValueVal); err = rd_kafka_NewTopic_set_config( @@ -875,7 +874,7 @@ rd_kafka_NewTopic_t* FromV8TopicObject( return new_topic; } -rd_kafka_NewTopic_t** FromV8TopicObjectArray(v8::Local) { +rd_kafka_NewTopic_t** FromV8TopicObjectArray(Napi::Array) { return NULL; } @@ -884,20 +883,20 @@ rd_kafka_NewTopic_t** FromV8TopicObjectArray(v8::Local) { * rd_kafka_consumer_group_state_t. */ std::vector FromV8GroupStateArray( - v8::Local array) { - v8::Local parameter = array.As(); + Napi::Array array) { + Napi::Array parameter = array.As(); std::vector returnVec; if (parameter->Length() >= 1) { for (unsigned int i = 0; i < parameter->Length(); i++) { - v8::Local v; - if (!Nan::Get(parameter, i).ToLocal(&v)) { + Napi::Value v; + if (!(parameter).Get(i).ToLocal(&v)) { continue; } - Nan::Maybe maybeT = Nan::To(v); + Napi::Maybe maybeT = v.As().Int64Value(); if (maybeT.IsNothing()) { continue; } - int64_t state_number = maybeT.FromJust(); + int64_t state_number = maybeT; if (state_number >= RD_KAFKA_CONSUMER_GROUP_STATE__CNT) { continue; } @@ -911,7 +910,7 @@ std::vector FromV8GroupStateArray( /** * @brief Converts a rd_kafka_ListConsumerGroups_result_t* into a v8 object. */ -v8::Local FromListConsumerGroupsResult( +Napi::Object FromListConsumerGroupsResult( const rd_kafka_ListConsumerGroups_result_t* result) { /* Return object type: { @@ -924,50 +923,50 @@ v8::Local FromListConsumerGroupsResult( errors: LibrdKafkaError[] } */ - v8::Local returnObject = Nan::New(); + Napi::Object returnObject = Napi::Object::New(env); size_t error_cnt; const rd_kafka_error_t** error_list = rd_kafka_ListConsumerGroups_result_errors(result, &error_cnt); - Nan::Set(returnObject, Nan::New("errors").ToLocalChecked(), + (returnObject).Set(Napi::String::New(env, "errors"), Conversion::Util::ToV8Array(error_list, error_cnt)); - v8::Local groups = Nan::New(); + Napi::Array groups = Napi::Array::New(env); size_t groups_cnt; const rd_kafka_ConsumerGroupListing_t** groups_list = rd_kafka_ListConsumerGroups_result_valid(result, &groups_cnt); for (size_t i = 0; i < groups_cnt; i++) { const rd_kafka_ConsumerGroupListing_t* group = groups_list[i]; - v8::Local groupObject = Nan::New(); + Napi::Object groupObject = Napi::Object::New(env); - Nan::Set(groupObject, Nan::New("groupId").ToLocalChecked(), - Nan::New(rd_kafka_ConsumerGroupListing_group_id(group)) - .ToLocalChecked()); + (groupObject).Set(Napi::String::New(env, "groupId"), + Napi::String::New(env, rd_kafka_ConsumerGroupListing_group_id(group)) + ); bool is_simple = rd_kafka_ConsumerGroupListing_is_simple_consumer_group(group); - Nan::Set(groupObject, Nan::New("isSimpleConsumerGroup").ToLocalChecked(), - Nan::New(is_simple)); + (groupObject).Set(Napi::String::New(env, "isSimpleConsumerGroup"), + Napi::Boolean::New(env, is_simple)); std::string protocol_type = is_simple ? "simple" : "consumer"; - Nan::Set(groupObject, Nan::New("protocolType").ToLocalChecked(), - Nan::New(protocol_type).ToLocalChecked()); + (groupObject).Set(Napi::String::New(env, "protocolType"), + Napi::String::New(env, protocol_type)); - Nan::Set(groupObject, Nan::New("state").ToLocalChecked(), - Nan::New(rd_kafka_ConsumerGroupListing_state(group))); + (groupObject).Set(Napi::String::New(env, "state"), + Napi::Number::New(env, rd_kafka_ConsumerGroupListing_state(group))); - Nan::Set(groups, i, groupObject); + (groups).Set(i, groupObject); } - Nan::Set(returnObject, Nan::New("groups").ToLocalChecked(), groups); + (returnObject).Set(Napi::String::New(env, "groups"), groups); return returnObject; } /** * @brief Converts a rd_kafka_MemberDescription_t* into a v8 object. */ -v8::Local FromMemberDescription( +Napi::Object FromMemberDescription( const rd_kafka_MemberDescription_t* member) { /* Return object type: { @@ -982,37 +981,37 @@ v8::Local FromMemberDescription( }, } */ - v8::Local returnObject = Nan::New(); + Napi::Object returnObject = Napi::Object::New(env); // clientHost - Nan::Set(returnObject, Nan::New("clientHost").ToLocalChecked(), - Nan::New(rd_kafka_MemberDescription_host(member)) - .ToLocalChecked()); + (returnObject).Set(Napi::String::New(env, "clientHost"), + Napi::String::New(env, rd_kafka_MemberDescription_host(member)) + ); // clientId - Nan::Set(returnObject, Nan::New("clientId").ToLocalChecked(), - Nan::New(rd_kafka_MemberDescription_client_id(member)) - .ToLocalChecked()); + (returnObject).Set(Napi::String::New(env, "clientId"), + Napi::String::New(env, rd_kafka_MemberDescription_client_id(member)) + ); // memberId - Nan::Set(returnObject, Nan::New("memberId").ToLocalChecked(), - Nan::New(rd_kafka_MemberDescription_consumer_id(member)) - .ToLocalChecked()); + (returnObject).Set(Napi::String::New(env, "memberId"), + Napi::String::New(env, rd_kafka_MemberDescription_consumer_id(member)) + ); // memberAssignment - not passed to user, always null - Nan::Set(returnObject, Nan::New("memberAssignment").ToLocalChecked(), - Nan::Null()); + (returnObject).Set(Napi::String::New(env, "memberAssignment"), + env.Null()); // memberMetadata - not passed to user, always null - Nan::Set(returnObject, Nan::New("memberMetadata").ToLocalChecked(), - Nan::Null()); + (returnObject).Set(Napi::String::New(env, "memberMetadata"), + env.Null()); // groupInstanceId const char* group_instance_id = rd_kafka_MemberDescription_group_instance_id(member); if (group_instance_id) { - Nan::Set(returnObject, Nan::New("groupInstanceId").ToLocalChecked(), - Nan::New(group_instance_id).ToLocalChecked()); + (returnObject).Set(Napi::String::New(env, "groupInstanceId"), + Napi::String::New(env, group_instance_id)); } // assignment @@ -1020,12 +1019,12 @@ v8::Local FromMemberDescription( rd_kafka_MemberDescription_assignment(member); const rd_kafka_topic_partition_list_t* partitions = rd_kafka_MemberAssignment_partitions(assignment); - v8::Local topicPartitions = + Napi::Array topicPartitions = Conversion::TopicPartition::ToTopicPartitionV8Array(partitions, false); - v8::Local assignmentObject = Nan::New(); - Nan::Set(assignmentObject, Nan::New("topicPartitions").ToLocalChecked(), + Napi::Object assignmentObject = Napi::Object::New(env); + (assignmentObject).Set(Napi::String::New(env, "topicPartitions"), topicPartitions); - Nan::Set(returnObject, Nan::New("assignment").ToLocalChecked(), + (returnObject).Set(Napi::String::New(env, "assignment"), assignmentObject); return returnObject; @@ -1034,7 +1033,7 @@ v8::Local FromMemberDescription( /** * @brief Converts a rd_kafka_ConsumerGroupDescription_t* into a v8 object. */ -v8::Local FromConsumerGroupDescription( +Napi::Object FromConsumerGroupDescription( const rd_kafka_ConsumerGroupDescription_t* desc) { /* Return object type: { @@ -1050,13 +1049,13 @@ v8::Local FromConsumerGroupDescription( authorizedOperations: AclOperationType[] - internally numbers } */ - v8::Local returnObject = Nan::New(); + Napi::Object returnObject = Napi::Object::New(env); // groupId - Nan::Set( - returnObject, Nan::New("groupId").ToLocalChecked(), - Nan::New(rd_kafka_ConsumerGroupDescription_group_id(desc)) - .ToLocalChecked()); + ( + returnObject).Set(Napi::String::New(env, "groupId"), + Napi::String::New(env, rd_kafka_ConsumerGroupDescription_group_id(desc)) + ); // error const rd_kafka_error_t* error = rd_kafka_ConsumerGroupDescription_error(desc); @@ -1064,54 +1063,54 @@ v8::Local FromConsumerGroupDescription( RdKafka::ErrorCode code = static_cast(rd_kafka_error_code(error)); std::string msg = std::string(rd_kafka_error_string(error)); - Nan::Set(returnObject, Nan::New("error").ToLocalChecked(), + (returnObject).Set(Napi::String::New(env, "error"), RdKafkaError(code, msg)); } // members - v8::Local members = Nan::New(); + Napi::Array members = Napi::Array::New(env); size_t member_cnt = rd_kafka_ConsumerGroupDescription_member_count(desc); for (size_t i = 0; i < member_cnt; i++) { const rd_kafka_MemberDescription_t* member = rd_kafka_ConsumerGroupDescription_member(desc, i); - Nan::Set(members, i, FromMemberDescription(member)); + (members).Set(i, FromMemberDescription(member)); } - Nan::Set(returnObject, Nan::New("members").ToLocalChecked(), members); + (returnObject).Set(Napi::String::New(env, "members"), members); // isSimpleConsumerGroup bool is_simple = rd_kafka_ConsumerGroupDescription_is_simple_consumer_group(desc); - Nan::Set(returnObject, Nan::New("isSimpleConsumerGroup").ToLocalChecked(), - Nan::New(is_simple)); + (returnObject).Set(Napi::String::New(env, "isSimpleConsumerGroup"), + Napi::Boolean::New(env, is_simple)); // protocolType std::string protocolType = is_simple ? "simple" : "consumer"; - Nan::Set(returnObject, Nan::New("protocolType").ToLocalChecked(), - Nan::New(protocolType).ToLocalChecked()); + (returnObject).Set(Napi::String::New(env, "protocolType"), + Napi::String::New(env, protocolType)); // protocol - Nan::Set(returnObject, Nan::New("protocol").ToLocalChecked(), - Nan::New( + (returnObject).Set(Napi::String::New(env, "protocol"), + Napi::String::New(env, rd_kafka_ConsumerGroupDescription_partition_assignor(desc)) - .ToLocalChecked()); + ); // partitionAssignor - Nan::Set(returnObject, Nan::New("partitionAssignor").ToLocalChecked(), - Nan::New( + (returnObject).Set(Napi::String::New(env, "partitionAssignor"), + Napi::String::New(env, rd_kafka_ConsumerGroupDescription_partition_assignor(desc)) - .ToLocalChecked()); + ); // state - Nan::Set(returnObject, Nan::New("state").ToLocalChecked(), - Nan::New(rd_kafka_ConsumerGroupDescription_state(desc))); + (returnObject).Set(Napi::String::New(env, "state"), + Napi::Number::New(env, rd_kafka_ConsumerGroupDescription_state(desc))); // coordinator const rd_kafka_Node_t* coordinator = rd_kafka_ConsumerGroupDescription_coordinator(desc); if (coordinator) { - v8::Local coordinatorObject = + Napi::Object coordinatorObject = Conversion::Util::ToV8Object(coordinator); - Nan::Set(returnObject, Nan::New("coordinator").ToLocalChecked(), + (returnObject).Set(Napi::String::New(env, "coordinator"), coordinatorObject); } @@ -1121,7 +1120,7 @@ v8::Local FromConsumerGroupDescription( rd_kafka_ConsumerGroupDescription_authorized_operations( desc, &authorized_operations_cnt); if (authorized_operations) { - Nan::Set(returnObject, Nan::New("authorizedOperations").ToLocalChecked(), + (returnObject).Set(Napi::String::New(env, "authorizedOperations"), Conversion::Util::ToV8Array(authorized_operations, authorized_operations_cnt)); } @@ -1132,30 +1131,30 @@ v8::Local FromConsumerGroupDescription( /** * @brief Converts a rd_kafka_DescribeConsumerGroups_result_t* into a v8 object. */ -v8::Local FromDescribeConsumerGroupsResult( +Napi::Object FromDescribeConsumerGroupsResult( const rd_kafka_DescribeConsumerGroups_result_t* result) { /* Return object type: { groups: GroupDescription[] } */ - v8::Local returnObject = Nan::New(); - v8::Local groups = Nan::New(); + Napi::Object returnObject = Napi::Object::New(env); + Napi::Array groups = Napi::Array::New(env); size_t groups_cnt; const rd_kafka_ConsumerGroupDescription_t** groups_list = rd_kafka_DescribeConsumerGroups_result_groups(result, &groups_cnt); for (size_t i = 0; i < groups_cnt; i++) { const rd_kafka_ConsumerGroupDescription_t* group = groups_list[i]; - Nan::Set(groups, i, FromConsumerGroupDescription(group)); + (groups).Set(i, FromConsumerGroupDescription(group)); } - Nan::Set(returnObject, Nan::New("groups").ToLocalChecked(), groups); + (returnObject).Set(Napi::String::New(env, "groups"), groups); return returnObject; } /** * @brief Converts a rd_kafka_DeleteGroups_result_t* into a v8 array. */ -v8::Local FromDeleteGroupsResult( +Napi::Array FromDeleteGroupsResult( const rd_kafka_DeleteGroups_result_t* result) { /* Return object type: [{ @@ -1164,34 +1163,34 @@ v8::Local FromDeleteGroupsResult( error?: LibrdKafkaError }] */ - v8::Local returnArray = Nan::New(); + Napi::Array returnArray = Napi::Array::New(env); size_t result_cnt; const rd_kafka_group_result_t** results = rd_kafka_DeleteGroups_result_groups(result, &result_cnt); for (size_t i = 0; i < result_cnt; i++) { const rd_kafka_group_result_t* group_result = results[i]; - v8::Local group_object = Nan::New(); + Napi::Object group_object = Napi::Object::New(env); - Nan::Set(group_object, Nan::New("groupId").ToLocalChecked(), - Nan::New(rd_kafka_group_result_name(group_result)) - .ToLocalChecked()); + (group_object).Set(Napi::String::New(env, "groupId"), + Napi::String::New(env, rd_kafka_group_result_name(group_result)) + ); const rd_kafka_error_t* error = rd_kafka_group_result_error(group_result); if (!error) { - Nan::Set(group_object, Nan::New("errorCode").ToLocalChecked(), - Nan::New(RD_KAFKA_RESP_ERR_NO_ERROR)); + (group_object).Set(Napi::String::New(env, "errorCode"), + Napi::Number::New(env, RD_KAFKA_RESP_ERR_NO_ERROR)); } else { RdKafka::ErrorCode code = static_cast(rd_kafka_error_code(error)); const char* msg = rd_kafka_error_string(error); - Nan::Set(group_object, Nan::New("errorCode").ToLocalChecked(), - Nan::New(code)); - Nan::Set(group_object, Nan::New("error").ToLocalChecked(), + (group_object).Set(Napi::String::New(env, "errorCode"), + Napi::Number::New(env, code)); + (group_object).Set(Napi::String::New(env, "error"), RdKafkaError(code, msg)); } - Nan::Set(returnArray, i, group_object); + (returnArray).Set(i, group_object); } return returnArray; @@ -1201,7 +1200,7 @@ v8::Local FromDeleteGroupsResult( * @brief Converts a rd_kafka_ListConsumerGroupOffsets_result_t* * into a v8 Array. */ -v8::Local FromListConsumerGroupOffsetsResult( +Napi::Array FromListConsumerGroupOffsetsResult( const rd_kafka_ListConsumerGroupOffsets_result_t* result) { /* Return Object type: GroupResults[] = [{ @@ -1221,7 +1220,7 @@ v8::Local FromListConsumerGroupOffsetsResult( } */ - v8::Local returnArray = Nan::New(); + Napi::Array returnArray = Napi::Array::New(env); size_t result_cnt; const rd_kafka_group_result_t** res = rd_kafka_ListConsumerGroupOffsets_result_groups(result, &result_cnt); @@ -1230,12 +1229,12 @@ v8::Local FromListConsumerGroupOffsetsResult( const rd_kafka_group_result_t* group_result = res[i]; // Create group result object - v8::Local group_object = Nan::New(); + Napi::Object group_object = Napi::Object::New(env); // Set groupId std::string groupId = rd_kafka_group_result_name(group_result); - Nan::Set(group_object, Nan::New("groupId").ToLocalChecked(), - Nan::New(groupId.c_str()).ToLocalChecked()); + (group_object).Set(Napi::String::New(env, "groupId"), + Napi::String::New(env, groupId.c_str())); // Set group-level error (if any) const rd_kafka_error_t* group_error = @@ -1244,7 +1243,7 @@ v8::Local FromListConsumerGroupOffsetsResult( RdKafka::ErrorCode code = static_cast(rd_kafka_error_code(group_error)); const char* msg = rd_kafka_error_string(group_error); - Nan::Set(group_object, Nan::New("error").ToLocalChecked(), + (group_object).Set(Napi::String::New(env, "error"), RdKafkaError(code, msg)); } @@ -1253,57 +1252,57 @@ v8::Local FromListConsumerGroupOffsetsResult( rd_kafka_group_result_partitions(group_result); // Prepare array for TopicPartitionOffset[] - v8::Local partitionsArray = Nan::New(); + Napi::Array partitionsArray = Napi::Array::New(env); int partitionIndex = 0; for (int j = 0; j < partitionList->cnt; j++) { const rd_kafka_topic_partition_t* partition = &partitionList->elems[j]; // Create the TopicPartitionOffset object - v8::Local partition_object = Nan::New(); + Napi::Object partition_object = Napi::Object::New(env); // Set topic, partition, and offset - Nan::Set(partition_object, Nan::New("topic").ToLocalChecked(), - Nan::New(partition->topic).ToLocalChecked()); - Nan::Set(partition_object, Nan::New("partition").ToLocalChecked(), - Nan::New(partition->partition)); - Nan::Set(partition_object, Nan::New("offset").ToLocalChecked(), - Nan::New(partition->offset)); + (partition_object).Set(Napi::String::New(env, "topic"), + Napi::String::New(env, partition->topic)); + (partition_object).Set(Napi::String::New(env, "partition"), + Napi::Number::New(env, partition->partition)); + (partition_object).Set(Napi::String::New(env, "offset"), + Napi::Number::New(env, partition->offset)); // Set metadata (if available) if (partition->metadata != nullptr) { - Nan::Set( - partition_object, Nan::New("metadata").ToLocalChecked(), - Nan::New(static_cast(partition->metadata)) - .ToLocalChecked()); + ( + partition_object).Set(Napi::String::New(env, "metadata"), + Napi::String::New(env, static_cast(partition->metadata)) + ); } else { - Nan::Set(partition_object, Nan::New("metadata").ToLocalChecked(), - Nan::Null()); + (partition_object).Set(Napi::String::New(env, "metadata"), + env.Null()); } // Set leaderEpoch (if available) int32_t leader_epoch = rd_kafka_topic_partition_get_leader_epoch(partition); if (leader_epoch >= 0) { - Nan::Set(partition_object, Nan::New("leaderEpoch").ToLocalChecked(), - Nan::New(leader_epoch)); + (partition_object).Set(Napi::String::New(env, "leaderEpoch"), + Napi::Number::New(env, leader_epoch)); } // Set partition-level error (if any) if (partition->err != RD_KAFKA_RESP_ERR_NO_ERROR) { RdKafka::ErrorCode code = static_cast(partition->err); - Nan::Set(group_object, Nan::New("error").ToLocalChecked(), + (group_object).Set(Napi::String::New(env, "error"), RdKafkaError(code, rd_kafka_err2str(partition->err))); } - Nan::Set(partitionsArray, partitionIndex++, partition_object); + (partitionsArray).Set(partitionIndex++, partition_object); } - Nan::Set(group_object, Nan::New("partitions").ToLocalChecked(), + (group_object).Set(Napi::String::New(env, "partitions"), partitionsArray); - Nan::Set(returnArray, i, group_object); + (returnArray).Set(i, group_object); } return returnArray; @@ -1312,7 +1311,7 @@ v8::Local FromListConsumerGroupOffsetsResult( /** * @brief Converts a rd_kafka_DeleteRecords_result_t* into a v8 Array. */ -v8::Local FromDeleteRecordsResult( +Napi::Array FromDeleteRecordsResult( const rd_kafka_DeleteRecords_result_t* result) { /* Return object type: [{ @@ -1325,30 +1324,30 @@ v8::Local FromDeleteRecordsResult( const rd_kafka_topic_partition_list_t* partitionList = rd_kafka_DeleteRecords_result_offsets(result); - v8::Local partitionsArray = Nan::New(); + Napi::Array partitionsArray = Napi::Array::New(env); int partitionIndex = 0; for (int j = 0; j < partitionList->cnt; j++) { const rd_kafka_topic_partition_t* partition = &partitionList->elems[j]; // Create the TopicPartitionOffset object - v8::Local partition_object = Nan::New(); + Napi::Object partition_object = Napi::Object::New(env); // Set topic, partition, and offset and error(if required) - Nan::Set(partition_object, Nan::New("topic").ToLocalChecked(), - Nan::New(partition->topic).ToLocalChecked()); - Nan::Set(partition_object, Nan::New("partition").ToLocalChecked(), - Nan::New(partition->partition)); - Nan::Set(partition_object, Nan::New("lowWatermark").ToLocalChecked(), - Nan::New(partition->offset)); + (partition_object).Set(Napi::String::New(env, "topic"), + Napi::String::New(env, partition->topic)); + (partition_object).Set(Napi::String::New(env, "partition"), + Napi::Number::New(env, partition->partition)); + (partition_object).Set(Napi::String::New(env, "lowWatermark"), + Napi::Number::New(env, partition->offset)); if (partition->err != RD_KAFKA_RESP_ERR_NO_ERROR) { RdKafka::ErrorCode code = static_cast(partition->err); - Nan::Set(partition_object, Nan::New("error").ToLocalChecked(), + (partition_object).Set(Napi::String::New(env, "error"), RdKafkaError(code, rd_kafka_err2str(partition->err))); } - Nan::Set(partitionsArray, partitionIndex++, partition_object); + (partitionsArray).Set(partitionIndex++, partition_object); } return partitionsArray; @@ -1357,7 +1356,7 @@ v8::Local FromDeleteRecordsResult( /** * @brief Converts a rd_kafka_DescribeTopics_result_t* into a v8 Array. */ -v8::Local FromDescribeTopicsResult( +Napi::Array FromDescribeTopicsResult( const rd_kafka_DescribeTopics_result_t* result) { /* Return object type: [{ @@ -1385,7 +1384,7 @@ v8::Local FromDescribeTopicsResult( } */ - v8::Local returnArray = Nan::New(); + Napi::Array returnArray = Napi::Array::New(env); size_t result_cnt; const rd_kafka_TopicDescription_t** results = rd_kafka_DescribeTopics_result_topics(result, &result_cnt); @@ -1393,26 +1392,26 @@ v8::Local FromDescribeTopicsResult( int topicIndex = 0; for (size_t i = 0; i < result_cnt; i++) { - v8::Local topic_object = Nan::New(); + Napi::Object topic_object = Napi::Object::New(env); const char* topic_name = rd_kafka_TopicDescription_name(results[i]); - Nan::Set(topic_object, Nan::New("name").ToLocalChecked(), - Nan::New(topic_name).ToLocalChecked()); + (topic_object).Set(Napi::String::New(env, "name"), + Napi::String::New(env, topic_name)); const rd_kafka_Uuid_t* topic_id = rd_kafka_TopicDescription_topic_id(results[i]); - Nan::Set(topic_object, Nan::New("topicId").ToLocalChecked(), + (topic_object).Set(Napi::String::New(env, "topicId"), Conversion::Util::UuidToV8Object(topic_id)); int is_internal = rd_kafka_TopicDescription_is_internal(results[i]); - Nan::Set(topic_object, Nan::New("isInternal").ToLocalChecked(), - Nan::New(is_internal)); + (topic_object).Set(Napi::String::New(env, "isInternal"), + Napi::Boolean::New(env, is_internal)); const rd_kafka_error_t* error = rd_kafka_TopicDescription_error(results[i]); if (error) { RdKafka::ErrorCode code = static_cast(rd_kafka_error_code(error)); - Nan::Set(topic_object, Nan::New("error").ToLocalChecked(), + (topic_object).Set(Napi::String::New(env, "error"), RdKafkaError(code, rd_kafka_error_string(error))); } @@ -1421,7 +1420,7 @@ v8::Local FromDescribeTopicsResult( rd_kafka_TopicDescription_authorized_operations( results[i], &authorized_operations_cnt); if (authorized_operations) { - Nan::Set(topic_object, Nan::New("authorizedOperations").ToLocalChecked(), + (topic_object).Set(Napi::String::New(env, "authorizedOperations"), Conversion::Util::ToV8Array(authorized_operations, authorized_operations_cnt)); } @@ -1429,44 +1428,44 @@ v8::Local FromDescribeTopicsResult( size_t partition_cnt; const rd_kafka_TopicPartitionInfo_t** partitions = rd_kafka_TopicDescription_partitions(results[i], &partition_cnt); - v8::Local partitionsArray = Nan::New(); + Napi::Array partitionsArray = Napi::Array::New(env); for (size_t j = 0; j < partition_cnt; j++) { - v8::Local partition_object = Nan::New(); + Napi::Object partition_object = Napi::Object::New(env); const rd_kafka_TopicPartitionInfo_t* partition = partitions[j]; - Nan::Set(partition_object, Nan::New("partition").ToLocalChecked(), - Nan::New( + (partition_object).Set(Napi::String::New(env, "partition"), + Napi::Number::New(env, rd_kafka_TopicPartitionInfo_partition(partition))); const rd_kafka_Node_t* leader = rd_kafka_TopicPartitionInfo_leader(partition); - Nan::Set(partition_object, Nan::New("leader").ToLocalChecked(), + (partition_object).Set(Napi::String::New(env, "leader"), Conversion::Util::ToV8Object(leader)); size_t isr_cnt; const rd_kafka_Node_t** isr = rd_kafka_TopicPartitionInfo_isr(partition, &isr_cnt); - v8::Local isrArray = Nan::New(); + Napi::Array isrArray = Napi::Array::New(env); for (size_t k = 0; k < isr_cnt; k++) { - Nan::Set(isrArray, k, Conversion::Util::ToV8Object(isr[k])); + (isrArray).Set(k, Conversion::Util::ToV8Object(isr[k])); } - Nan::Set(partition_object, Nan::New("isr").ToLocalChecked(), isrArray); + (partition_object).Set(Napi::String::New(env, "isr"), isrArray); size_t replicas_cnt; const rd_kafka_Node_t** replicas = rd_kafka_TopicPartitionInfo_replicas(partition, &replicas_cnt); - v8::Local replicasArray = Nan::New(); + Napi::Array replicasArray = Napi::Array::New(env); for (size_t k = 0; k < replicas_cnt; k++) { - Nan::Set(replicasArray, k, Conversion::Util::ToV8Object(replicas[k])); + (replicasArray).Set(k, Conversion::Util::ToV8Object(replicas[k])); } - Nan::Set(partition_object, Nan::New("replicas").ToLocalChecked(), + (partition_object).Set(Napi::String::New(env, "replicas"), replicasArray); - Nan::Set(partitionsArray, j, partition_object); + (partitionsArray).Set(j, partition_object); } - Nan::Set(topic_object, Nan::New("partitions").ToLocalChecked(), + (topic_object).Set(Napi::String::New(env, "partitions"), partitionsArray); - Nan::Set(returnArray, topicIndex++, topic_object); + (returnArray).Set(topicIndex++, topic_object); } return returnArray; @@ -1475,7 +1474,7 @@ v8::Local FromDescribeTopicsResult( /** * @brief Converts a rd_kafka_ListOffsets_result_t* into a v8 Array. */ -v8::Local FromListOffsetsResult( +Napi::Array FromListOffsetsResult( const rd_kafka_ListOffsets_result_t* result) { /* Return object type: [{ @@ -1491,7 +1490,7 @@ v8::Local FromListOffsetsResult( const rd_kafka_ListOffsetsResultInfo_t** results = rd_kafka_ListOffsets_result_infos(result, &result_cnt); - v8::Local resultArray = Nan::New(); + Napi::Array resultArray = Napi::Array::New(env); int partitionIndex = 0; for (i = 0; i < result_cnt; i++) { @@ -1500,31 +1499,31 @@ v8::Local FromListOffsetsResult( int64_t timestamp = rd_kafka_ListOffsetsResultInfo_timestamp(results[i]); // Create the ListOffsetsResult object - v8::Local partition_object = Nan::New(); + Napi::Object partition_object = Napi::Object::New(env); // Set topic, partition, offset, error and timestamp - Nan::Set(partition_object, Nan::New("topic").ToLocalChecked(), - Nan::New(partition->topic).ToLocalChecked()); - Nan::Set(partition_object, Nan::New("partition").ToLocalChecked(), - Nan::New(partition->partition)); - Nan::Set(partition_object, Nan::New("offset").ToLocalChecked(), - Nan::New(partition->offset)); + (partition_object).Set(Napi::String::New(env, "topic"), + Napi::String::New(env, partition->topic)); + (partition_object).Set(Napi::String::New(env, "partition"), + Napi::Number::New(env, partition->partition)); + (partition_object).Set(Napi::String::New(env, "offset"), + Napi::Number::New(env, partition->offset)); if (partition->err != RD_KAFKA_RESP_ERR_NO_ERROR) { RdKafka::ErrorCode code = static_cast(partition->err); - Nan::Set(partition_object, Nan::New("error").ToLocalChecked(), + (partition_object).Set(Napi::String::New(env, "error"), RdKafkaError(code, rd_kafka_err2str(partition->err))); } // Set leaderEpoch (if available) int32_t leader_epoch = rd_kafka_topic_partition_get_leader_epoch(partition); if (leader_epoch >= 0) { - Nan::Set(partition_object, Nan::New("leaderEpoch").ToLocalChecked(), - Nan::New(leader_epoch)); + (partition_object).Set(Napi::String::New(env, "leaderEpoch"), + Napi::Number::New(env, leader_epoch)); } - Nan::Set(partition_object, Nan::New("timestamp").ToLocalChecked(), - Nan::New(timestamp)); + (partition_object).Set(Napi::String::New(env, "timestamp"), + Napi::Number::New(env, timestamp)); - Nan::Set(resultArray, partitionIndex++, partition_object); + (resultArray).Set(partitionIndex++, partition_object); } return resultArray; @@ -1535,8 +1534,8 @@ v8::Local FromListOffsetsResult( } // namespace Conversion namespace Util { - std::string FromV8String(v8::Local val) { - Nan::Utf8String keyUTF8(val); + std::string FromV8String(Napi::String val) { + std::string keyUTF8 = val.As(); return std::string(*keyUTF8); } } // Namespace Util diff --git a/src/common.h b/src/common.h index 121a5cda..0a393f4e 100644 --- a/src/common.h +++ b/src/common.h @@ -11,7 +11,8 @@ #ifndef SRC_COMMON_H_ #define SRC_COMMON_H_ -#include +#include +#include #include #include @@ -31,16 +32,16 @@ namespace NodeKafka { void Log(std::string); -template T GetParameter(v8::Local, std::string, T); +template T GetParameter(Napi::Object, std::string, T); template<> std::string GetParameter( - v8::Local, std::string, std::string); + Napi::Object, std::string, std::string); template<> std::vector GetParameter >( - v8::Local, std::string, std::vector); -template<> v8::Local GetParameter >( - v8::Local, std::string, v8::Local); + Napi::Object, std::string, std::vector); +template<> Napi::Array GetParameter( + Napi::Object, std::string, Napi::Array); // template int GetParameter(v8::Local v8ArrayToStringVector(v8::Local); -std::list v8ArrayToStringList(v8::Local); +std::vector v8ArrayToStringVector(Napi::Array); +std::list v8ArrayToStringList(Napi::Array); class scoped_mutex_lock { public: @@ -96,91 +97,91 @@ class scoped_shared_read_lock { namespace Conversion { namespace Util { -std::vector ToStringVector(v8::Local); -v8::Local ToV8Array(std::vector); -v8::Local ToV8Array(const rd_kafka_error_t **error_list, +std::vector ToStringVector(Napi::Array); +Napi::Array ToV8Array(std::vector); +Napi::Array ToV8Array(const rd_kafka_error_t **error_list, size_t error_cnt); -v8::Local UuidToV8Object(const rd_kafka_Uuid_t* uuid); -v8::Local ToV8Array(const rd_kafka_AclOperation_t *, size_t); +Napi::Object UuidToV8Object(const rd_kafka_Uuid_t* uuid); +Napi::Array ToV8Array(const rd_kafka_AclOperation_t *, size_t); -v8::Local ToV8Object(const rd_kafka_Node_t *); +Napi::Object ToV8Object(const rd_kafka_Node_t *); } // namespace Util namespace Admin { // Topics from topic object, or topic object array -rd_kafka_NewTopic_t *FromV8TopicObject(v8::Local, +rd_kafka_NewTopic_t *FromV8TopicObject(Napi::Object, std::string &errstr); -rd_kafka_NewTopic_t **FromV8TopicObjectArray(v8::Local); +rd_kafka_NewTopic_t **FromV8TopicObjectArray(Napi::Array); // ListGroups: request std::vector FromV8GroupStateArray( - v8::Local); + Napi::Array); // ListGroups: response -v8::Local FromListConsumerGroupsResult( +Napi::Object FromListConsumerGroupsResult( const rd_kafka_ListConsumerGroups_result_t *); // DescribeGroups: response -v8::Local FromMemberDescription( +Napi::Object FromMemberDescription( const rd_kafka_MemberDescription_t *member); -v8::Local FromConsumerGroupDescription( +Napi::Object FromConsumerGroupDescription( const rd_kafka_ConsumerGroupDescription_t *desc); -v8::Local FromDescribeConsumerGroupsResult( +Napi::Object FromDescribeConsumerGroupsResult( const rd_kafka_DescribeConsumerGroups_result_t *); // DeleteGroups: Response -v8::Local FromDeleteGroupsResult( +Napi::Array FromDeleteGroupsResult( const rd_kafka_DeleteGroups_result_t *); // ListConsumerGroupOffsets: Response -v8::Local FromListConsumerGroupOffsetsResult( +Napi::Array FromListConsumerGroupOffsetsResult( const rd_kafka_ListConsumerGroupOffsets_result_t *result); // DeleteRecords: Response -v8::Local FromDeleteRecordsResult( +Napi::Array FromDeleteRecordsResult( const rd_kafka_DeleteRecords_result_t* result); // DescribeTopics: Response -v8::Local FromDescribeTopicsResult( +Napi::Array FromDescribeTopicsResult( const rd_kafka_DescribeTopics_result_t* result); // ListOffsets: Response -v8::Local FromListOffsetsResult( +Napi::Array FromListOffsetsResult( const rd_kafka_ListOffsets_result_t* result); } // namespace Admin namespace TopicPartition { -v8::Local ToV8Array(std::vector &); -v8::Local ToTopicPartitionV8Array( +Napi::Array ToV8Array(std::vector &); +Napi::Array ToTopicPartitionV8Array( const rd_kafka_topic_partition_list_t *, bool include_offset); -RdKafka::TopicPartition *FromV8Object(v8::Local); -std::vector FromV8Array(const v8::Local &); // NOLINT +RdKafka::TopicPartition *FromV8Object(Napi::Object); +std::vector FromV8Array(const Napi::Array &); // NOLINT rd_kafka_topic_partition_list_t *TopicPartitionv8ArrayToTopicPartitionList( - v8::Local parameter, bool include_offset); + Napi::Array parameter, bool include_offset); rd_kafka_topic_partition_list_t * TopicPartitionOffsetSpecv8ArrayToTopicPartitionList( - v8::Local parameter); + Napi::Array parameter); } // namespace TopicPartition namespace Metadata { -v8::Local ToV8Object(RdKafka::Metadata*); +Napi::Object ToV8Object(RdKafka::Metadata*); } // namespace Metadata namespace Message { -v8::Local ToV8Object(RdKafka::Message*); -v8::Local ToV8Object(RdKafka::Message*, bool, bool); +Napi::Object ToV8Object(RdKafka::Message*); +Napi::Object ToV8Object(RdKafka::Message*, bool, bool); } } // namespace Conversion namespace Util { - std::string FromV8String(v8::Local); + std::string FromV8String(Napi::String); } } // namespace NodeKafka diff --git a/src/config.cc b/src/config.cc index 5f66b2d8..d3411751 100644 --- a/src/config.cc +++ b/src/config.cc @@ -13,11 +13,10 @@ #include #include -using Nan::MaybeLocal; -using Nan::Maybe; -using v8::Local; -using v8::String; -using v8::Object; +using Napi::MaybeLocal; +using Napi::Maybe; +using Napi::String; +using Napi::Object; using std::cout; using std::endl; @@ -34,23 +33,23 @@ void Conf::DumpConfig(std::list *dump) { std::cout << std::endl; } -Conf * Conf::create(RdKafka::Conf::ConfType type, v8::Local object, std::string &errstr) { // NOLINT - v8::Local context = Nan::GetCurrentContext(); +Conf * Conf::create(RdKafka::Conf::ConfType type, Napi::Object object, std::string &errstr) { // NOLINT + v8::Local context = Napi::GetCurrentContext(); Conf* rdconf = static_cast(RdKafka::Conf::create(type)); v8::MaybeLocal _property_names = object->GetOwnPropertyNames( - Nan::GetCurrentContext()); - v8::Local property_names = _property_names.ToLocalChecked(); + Napi::GetCurrentContext()); + Napi::Array property_names = _property_names; for (unsigned int i = 0; i < property_names->Length(); ++i) { std::string string_value; std::string string_key; - v8::Local key = Nan::Get(property_names, i).ToLocalChecked(); - v8::Local value = Nan::Get(object, key).ToLocalChecked(); + Napi::Value key = (property_names).Get(i); + Napi::Value value = (object).Get(key); - if (key->IsString()) { - Nan::Utf8String utf8_key(key); + if (key.IsString()) { + std::string utf8_key = key.As(); string_key = std::string(*utf8_key); } else { continue; @@ -58,21 +57,21 @@ Conf * Conf::create(RdKafka::Conf::ConfType type, v8::Local object, if (!value->IsFunction()) { #if NODE_MAJOR_VERSION > 6 - if (value->IsInt32()) { + if (value.IsNumber()) { string_value = std::to_string( value->Int32Value(context).ToChecked()); } else if (value->IsUint32()) { string_value = std::to_string( value->Uint32Value(context).ToChecked()); } else if (value->IsBoolean()) { - const bool v = Nan::To(value).ToChecked(); + const bool v = value.As().Value().ToChecked(); string_value = v ? "true" : "false"; } else { - Nan::Utf8String utf8_value(value.As()); + std::string utf8_value = value.As(.As()); string_value = std::string(*utf8_value); } #else - Nan::Utf8String utf8_value(value.As()); + std::string utf8_value = value.As(.As()); string_value = std::string(*utf8_value); #endif if (rdconf->set(string_key, string_value, errstr) @@ -91,7 +90,7 @@ Conf * Conf::create(RdKafka::Conf::ConfType type, v8::Local object, void Conf::ConfigureCallback( const std::string &string_key, - const v8::Local &cb, + const Napi::Function &cb, bool add, std::string &errstr) { if (string_key.compare("rebalance_cb") == 0) { NodeKafka::Callbacks::Rebalance *rebalance = rebalance_cb(); diff --git a/src/config.h b/src/config.h index d7a5a786..7d41cc75 100644 --- a/src/config.h +++ b/src/config.h @@ -10,7 +10,8 @@ #ifndef SRC_CONFIG_H_ #define SRC_CONFIG_H_ -#include +#include +#include #include #include #include @@ -26,7 +27,7 @@ class Conf : public RdKafka::Conf { public: ~Conf(); - static Conf* create(RdKafka::Conf::ConfType, v8::Local, std::string &); // NOLINT + static Conf* create(RdKafka::Conf::ConfType, Napi::Object, std::string &); // NOLINT static void DumpConfig(std::list *); void listen(); @@ -34,7 +35,7 @@ class Conf : public RdKafka::Conf { void ConfigureCallback( const std::string &string_key, - const v8::Local &cb, + const Napi::Function &cb, bool add, std::string &errstr); bool is_sasl_oauthbearer() const; diff --git a/src/connection.cc b/src/connection.cc index 189c10f1..f03b323c 100644 --- a/src/connection.cc +++ b/src/connection.cc @@ -352,7 +352,7 @@ Baton Connection::SetOAuthBearerTokenFailure(const std::string& errstr) { } void Connection::ConfigureCallback( - const std::string &string_key, const v8::Local &cb, bool add) { + const std::string &string_key, const Napi::Function &cb, bool add) { if (string_key.compare("event_cb") == 0) { if (add) { this->m_event_cb.dispatcher.AddCallback(cb); @@ -364,134 +364,136 @@ void Connection::ConfigureCallback( // NAN METHODS -NAN_METHOD(Connection::NodeGetMetadata) { - Nan::HandleScope scope; +Napi::Value Connection::NodeGetMetadata(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); Connection* obj = ObjectWrap::Unwrap(info.This()); - v8::Local config; - if (info[0]->IsObject()) { - config = info[0].As(); + Napi::Object config; + if (info[0].IsObject()) { + config = info[0].As(); } else { - config = Nan::New(); + config = Napi::Object::New(env); } - if (!info[1]->IsFunction()) { - Nan::ThrowError("Second parameter must be a callback"); - return; + if (!info[1].IsFunction()) { + Napi::Error::New(env, "Second parameter must be a callback").ThrowAsJavaScriptException(); + return env.Null(); } - v8::Local cb = info[1].As(); + Napi::Function cb = info[1].As(); std::string topic = GetParameter(config, "topic", ""); bool allTopics = GetParameter(config, "allTopics", true); int timeout_ms = GetParameter(config, "timeout", 30000); - Nan::Callback *callback = new Nan::Callback(cb); + Napi::FunctionReference *callback = new Napi::FunctionReference(cb); - Nan::AsyncQueueWorker(new Workers::ConnectionMetadata( + Napi::AsyncQueueWorker(new Workers::ConnectionMetadata( callback, obj, topic, timeout_ms, allTopics)); - info.GetReturnValue().Set(Nan::Null()); + return env.Null(); } -NAN_METHOD(Connection::NodeOffsetsForTimes) { - Nan::HandleScope scope; +Napi::Value Connection::NodeOffsetsForTimes(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); - if (info.Length() < 3 || !info[0]->IsArray()) { + if (info.Length() < 3 || !info[0].IsArray()) { // Just throw an exception - return Nan::ThrowError("Need to specify an array of topic partitions"); + Napi::Error::New(env, "Need to specify an array of topic partitions").ThrowAsJavaScriptException(); + return env.Null(); } std::vector toppars = - Conversion::TopicPartition::FromV8Array(info[0].As()); + Conversion::TopicPartition::FromV8Array(info[0].As()); int timeout_ms; - Nan::Maybe maybeTimeout = - Nan::To(info[1].As()); + Napi::Maybe maybeTimeout = + info[1].As(.As().Uint32Value()); if (maybeTimeout.IsNothing()) { timeout_ms = 1000; } else { - timeout_ms = static_cast(maybeTimeout.FromJust()); + timeout_ms = static_cast(maybeTimeout); } - v8::Local cb = info[2].As(); - Nan::Callback *callback = new Nan::Callback(cb); + Napi::Function cb = info[2].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(cb); Connection* handle = ObjectWrap::Unwrap(info.This()); - Nan::AsyncQueueWorker( + Napi::AsyncQueueWorker( new Workers::Handle::OffsetsForTimes(callback, handle, toppars, timeout_ms)); - info.GetReturnValue().Set(Nan::Null()); + return env.Null(); } -NAN_METHOD(Connection::NodeQueryWatermarkOffsets) { - Nan::HandleScope scope; +Napi::Value Connection::NodeQueryWatermarkOffsets(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); Connection* obj = ObjectWrap::Unwrap(info.This()); - if (!info[0]->IsString()) { - Nan::ThrowError("1st parameter must be a topic string");; + if (!info[0].IsString()) { + Napi::Error::New(env, "1st parameter must be a topic string").ThrowAsJavaScriptException(); +; return; } - if (!info[1]->IsNumber()) { - Nan::ThrowError("2nd parameter must be a partition number"); - return; + if (!info[1].IsNumber()) { + Napi::Error::New(env, "2nd parameter must be a partition number").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[2]->IsNumber()) { - Nan::ThrowError("3rd parameter must be a number of milliseconds"); - return; + if (!info[2].IsNumber()) { + Napi::Error::New(env, "3rd parameter must be a number of milliseconds").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[3]->IsFunction()) { - Nan::ThrowError("4th parameter must be a callback"); - return; + if (!info[3].IsFunction()) { + Napi::Error::New(env, "4th parameter must be a callback").ThrowAsJavaScriptException(); + return env.Null(); } // Get string pointer for the topic name - Nan::Utf8String topicUTF8(Nan::To(info[0]).ToLocalChecked()); + std::string topicUTF8 = info[0].As(.To()); // The first parameter is the topic std::string topic_name(*topicUTF8); // Second parameter is the partition - int32_t partition = Nan::To(info[1]).FromJust(); + int32_t partition = info[1].As().Int32Value(); // Third parameter is the timeout - int timeout_ms = Nan::To(info[2]).FromJust(); + int timeout_ms = info[2].As().Int32Value(); // Fourth parameter is the callback - v8::Local cb = info[3].As(); - Nan::Callback *callback = new Nan::Callback(cb); + Napi::Function cb = info[3].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(cb); - Nan::AsyncQueueWorker(new Workers::ConnectionQueryWatermarkOffsets( + Napi::AsyncQueueWorker(new Workers::ConnectionQueryWatermarkOffsets( callback, obj, topic_name, partition, timeout_ms)); - info.GetReturnValue().Set(Nan::Null()); + return env.Null(); } -NAN_METHOD(Connection::NodeSetSaslCredentials) { - if (!info[0]->IsString()) { - Nan::ThrowError("1st parameter must be a username string"); - return; +Napi::Value Connection::NodeSetSaslCredentials(const Napi::CallbackInfo& info) { + if (!info[0].IsString()) { + Napi::Error::New(env, "1st parameter must be a username string").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[1]->IsString()) { - Nan::ThrowError("2nd parameter must be a password string"); - return; + if (!info[1].IsString()) { + Napi::Error::New(env, "2nd parameter must be a password string").ThrowAsJavaScriptException(); + return env.Null(); } // Get string pointer for the username - Nan::Utf8String usernameUTF8(Nan::To(info[0]).ToLocalChecked()); + std::string usernameUTF8 = info[0].As(.To()); // The first parameter is the username std::string username(*usernameUTF8); // Get string pointer for the password - Nan::Utf8String passwordUTF8(Nan::To(info[1]).ToLocalChecked()); + std::string passwordUTF8 = info[1].As(.To()); // The first parameter is the password std::string password(*passwordUTF8); @@ -499,44 +501,46 @@ NAN_METHOD(Connection::NodeSetSaslCredentials) { Baton b = obj->SetSaslCredentials(username, password); if (b.err() != RdKafka::ERR_NO_ERROR) { - v8::Local errorObject = b.ToObject(); - return Nan::ThrowError(errorObject); + Napi::Value errorObject = b.ToObject(); + Napi::Error::New(env, errorObject).ThrowAsJavaScriptException(); + return env.Null(); } - info.GetReturnValue().Set(Nan::Null()); + return env.Null(); } // Node methods -NAN_METHOD(Connection::NodeConfigureCallbacks) { - Nan::HandleScope scope; +Napi::Value Connection::NodeConfigureCallbacks(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); if (info.Length() < 2 || - !info[0]->IsBoolean() || - !info[1]->IsObject()) { + !info[0].IsBoolean() || + !info[1].IsObject()) { // Just throw an exception - return Nan::ThrowError("Need to specify a callbacks object"); + Napi::Error::New(env, "Need to specify a callbacks object").ThrowAsJavaScriptException(); + return env.Null(); } - v8::Local context = Nan::GetCurrentContext(); + v8::Local context = Napi::GetCurrentContext(); Connection* obj = ObjectWrap::Unwrap(info.This()); - const bool add = Nan::To(info[0]).ToChecked(); - v8::Local configs_object = - info[1]->ToObject(context).ToLocalChecked(); - v8::Local configs_property_names = - configs_object->GetOwnPropertyNames(context).ToLocalChecked(); + const bool add = info[0].As().Value().ToChecked(); + Napi::Object configs_object = + info[1].ToObject(context); + Napi::Array configs_property_names = + configs_object->GetOwnPropertyNames(context); for (unsigned int j = 0; j < configs_property_names->Length(); ++j) { std::string configs_string_key; - v8::Local configs_key = - Nan::Get(configs_property_names, j).ToLocalChecked(); - v8::Local configs_value = - Nan::Get(configs_object, configs_key).ToLocalChecked(); + Napi::Value configs_key = + (configs_property_names).Get(j); + Napi::Value configs_value = + (configs_object).Get(configs_key); int config_type = 0; - if (configs_value->IsObject() && configs_key->IsString()) { - Nan::Utf8String configs_utf8_key(configs_key); + if (configs_value.IsObject() && configs_key.IsString()) { + std::string configs_utf8_key = configs_key.As(); configs_string_key = std::string(*configs_utf8_key); if (configs_string_key.compare("global") == 0) { config_type = 1; @@ -551,38 +555,40 @@ NAN_METHOD(Connection::NodeConfigureCallbacks) { continue; } - v8::Local object = - configs_value->ToObject(context).ToLocalChecked(); - v8::Local property_names = - object->GetOwnPropertyNames(context).ToLocalChecked(); + Napi::Object object = + configs_value->ToObject(context); + Napi::Array property_names = + object->GetOwnPropertyNames(context); for (unsigned int i = 0; i < property_names->Length(); ++i) { std::string errstr; std::string string_key; - v8::Local key = Nan::Get(property_names, i).ToLocalChecked(); - v8::Local value = Nan::Get(object, key).ToLocalChecked(); + Napi::Value key = (property_names).Get(i); + Napi::Value value = (object).Get(key); - if (key->IsString()) { - Nan::Utf8String utf8_key(key); + if (key.IsString()) { + std::string utf8_key = key.As(); string_key = std::string(*utf8_key); } else { continue; } if (value->IsFunction()) { - v8::Local cb = value.As(); + Napi::Function cb = value.As(); switch (config_type) { case 1: obj->m_gconfig->ConfigureCallback(string_key, cb, add, errstr); if (!errstr.empty()) { - return Nan::ThrowError(errstr.c_str()); + Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); + return env.Null(); } break; case 2: obj->m_tconfig->ConfigureCallback(string_key, cb, add, errstr); if (!errstr.empty()) { - return Nan::ThrowError(errstr.c_str()); + Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); + return env.Null(); } break; case 3: @@ -593,46 +599,46 @@ NAN_METHOD(Connection::NodeConfigureCallbacks) { } } - info.GetReturnValue().Set(Nan::True()); + return env.True(); } -NAN_METHOD(Connection::NodeSetOAuthBearerToken) { - if (!info[0]->IsString()) { - Nan::ThrowError("1st parameter must be a token string"); - return; +Napi::Value Connection::NodeSetOAuthBearerToken(const Napi::CallbackInfo& info) { + if (!info[0].IsString()) { + Napi::Error::New(env, "1st parameter must be a token string").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[1]->IsNumber()) { - Nan::ThrowError("2nd parameter must be a lifetime_ms number"); - return; + if (!info[1].IsNumber()) { + Napi::Error::New(env, "2nd parameter must be a lifetime_ms number").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[2]->IsString()) { - Nan::ThrowError("3rd parameter must be a principal_name string"); - return; + if (!info[2].IsString()) { + Napi::Error::New(env, "3rd parameter must be a principal_name string").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[3]->IsNullOrUndefined() && !info[3]->IsArray()) { - Nan::ThrowError("4th parameter must be an extensions array or null"); - return; + if (!info[3].IsNullOrUndefined() && !info[3].IsArray()) { + Napi::Error::New(env, "4th parameter must be an extensions array or null").ThrowAsJavaScriptException(); + return env.Null(); } // Get string pointer for the token - Nan::Utf8String tokenUtf8(Nan::To(info[0]).ToLocalChecked()); + std::string tokenUtf8 = info[0].As(.To()); std::string token(*tokenUtf8); // Get the lifetime_ms - int64_t lifetime_ms = Nan::To(info[1]).FromJust(); + int64_t lifetime_ms = info[1].As().Int64Value(); // Get string pointer for the principal_name - Nan::Utf8String principal_nameUtf8( - Nan::To(info[2]).ToLocalChecked()); + std::string principal_nameUtf8 = + info[2].As(.To()); std::string principal_name(*principal_nameUtf8); // Get the extensions (if any) std::list extensions; - if (!info[3]->IsNullOrUndefined()) { - v8::Local extensionsArray = info[3].As(); + if (!info[3].IsNullOrUndefined()) { + Napi::Array extensionsArray = info[3].As(); extensions = v8ArrayToStringList(extensionsArray); } @@ -641,38 +647,40 @@ NAN_METHOD(Connection::NodeSetOAuthBearerToken) { obj->SetOAuthBearerToken(token, lifetime_ms, principal_name, extensions); if (b.err() != RdKafka::ERR_NO_ERROR) { - v8::Local errorObject = b.ToObject(); - return Nan::ThrowError(errorObject); + Napi::Value errorObject = b.ToObject(); + Napi::Error::New(env, errorObject).ThrowAsJavaScriptException(); + return env.Null(); } - info.GetReturnValue().Set(Nan::Null()); + return env.Null(); } -NAN_METHOD(Connection::NodeSetOAuthBearerTokenFailure) { - if (!info[0]->IsString()) { - Nan::ThrowError("1st parameter must be an error string"); - return; +Napi::Value Connection::NodeSetOAuthBearerTokenFailure(const Napi::CallbackInfo& info) { + if (!info[0].IsString()) { + Napi::Error::New(env, "1st parameter must be an error string").ThrowAsJavaScriptException(); + return env.Null(); } // Get string pointer for the error string - Nan::Utf8String errstrUtf8(Nan::To(info[0]).ToLocalChecked()); + std::string errstrUtf8 = info[0].As(.To()); std::string errstr(*errstrUtf8); Connection* obj = ObjectWrap::Unwrap(info.This()); Baton b = obj->SetOAuthBearerTokenFailure(errstr); if (b.err() != RdKafka::ERR_NO_ERROR) { - v8::Local errorObject = b.ToObject(); - return Nan::ThrowError(errorObject); + Napi::Value errorObject = b.ToObject(); + Napi::Error::New(env, errorObject).ThrowAsJavaScriptException(); + return env.Null(); } - info.GetReturnValue().Set(Nan::Null()); + return env.Null(); } -NAN_METHOD(Connection::NodeName) { +Napi::Value Connection::NodeName(const Napi::CallbackInfo& info) { Connection* obj = ObjectWrap::Unwrap(info.This()); std::string name = obj->Name(); - info.GetReturnValue().Set(Nan::New(name).ToLocalChecked()); + return Napi::New(env, name); } } // namespace NodeKafka diff --git a/src/connection.h b/src/connection.h index 532468fe..63442ee4 100644 --- a/src/connection.h +++ b/src/connection.h @@ -11,7 +11,8 @@ #ifndef SRC_CONNECTION_H_ #define SRC_CONNECTION_H_ -#include +#include +#include #include #include #include @@ -46,7 +47,7 @@ namespace NodeKafka { * @sa NodeKafka::Client */ -class Connection : public Nan::ObjectWrap { +class Connection : public Napi::ObjectWrap { public: bool IsConnected() const; bool IsClosing() const; @@ -73,7 +74,7 @@ class Connection : public Nan::ObjectWrap { virtual void DeactivateDispatchers() = 0; virtual void ConfigureCallback( - const std::string &string_key, const v8::Local &cb, bool add); + const std::string &string_key, const Napi::Function &cb, bool add); std::string Name() const; @@ -82,8 +83,8 @@ class Connection : public Nan::ObjectWrap { explicit Connection(Connection *); ~Connection(); - static Nan::Persistent constructor; - static void New(const Nan::FunctionCallbackInfo& info); + static Napi::FunctionReference constructor; + static void New(const Napi::CallbackInfo& info); static Baton rdkafkaErrorToBaton(RdKafka::Error* error); Baton setupSaslOAuthBearerConfig(); @@ -100,14 +101,14 @@ class Connection : public Nan::ObjectWrap { RdKafka::Handle* m_client; - static NAN_METHOD(NodeConfigureCallbacks); - static NAN_METHOD(NodeGetMetadata); - static NAN_METHOD(NodeQueryWatermarkOffsets); - static NAN_METHOD(NodeOffsetsForTimes); - static NAN_METHOD(NodeSetSaslCredentials); - static NAN_METHOD(NodeSetOAuthBearerToken); - static NAN_METHOD(NodeSetOAuthBearerTokenFailure); - static NAN_METHOD(NodeName); + static Napi::Value NodeConfigureCallbacks(const Napi::CallbackInfo& info); + static Napi::Value NodeGetMetadata(const Napi::CallbackInfo& info); + static Napi::Value NodeQueryWatermarkOffsets(const Napi::CallbackInfo& info); + static Napi::Value NodeOffsetsForTimes(const Napi::CallbackInfo& info); + static Napi::Value NodeSetSaslCredentials(const Napi::CallbackInfo& info); + static Napi::Value NodeSetOAuthBearerToken(const Napi::CallbackInfo& info); + static Napi::Value NodeSetOAuthBearerTokenFailure(const Napi::CallbackInfo& info); + static Napi::Value NodeName(const Napi::CallbackInfo& info); }; } // namespace NodeKafka diff --git a/src/errors.cc b/src/errors.cc index 9d1d9675..d0a68efe 100644 --- a/src/errors.cc +++ b/src/errors.cc @@ -14,36 +14,36 @@ namespace NodeKafka { -v8::Local RdKafkaError(const RdKafka::ErrorCode &err, +Napi::Object RdKafkaError(const RdKafka::ErrorCode &err, const std::string &errstr) { int code = static_cast(err); - v8::Local ret = Nan::New(); + Napi::Object ret = Napi::Object::New(env); - Nan::Set(ret, Nan::New("message").ToLocalChecked(), - Nan::New(errstr).ToLocalChecked()); - Nan::Set(ret, Nan::New("code").ToLocalChecked(), - Nan::New(code)); + (ret).Set(Napi::String::New(env, "message"), + Napi::String::New(env, errstr)); + (ret).Set(Napi::String::New(env, "code"), + Napi::Number::New(env, code)); return ret; } -v8::Local RdKafkaError(const RdKafka::ErrorCode &err) { +Napi::Object RdKafkaError(const RdKafka::ErrorCode &err) { std::string errstr = RdKafka::err2str(err); return RdKafkaError(err, errstr); } -v8::Local RdKafkaError( +Napi::Object RdKafkaError( const RdKafka::ErrorCode &err, std::string errstr, bool isFatal, bool isRetriable, bool isTxnRequiresAbort) { - v8::Local ret = RdKafkaError(err, errstr); + Napi::Object ret = RdKafkaError(err, errstr); - Nan::Set(ret, Nan::New("isFatal").ToLocalChecked(), - Nan::New(isFatal)); - Nan::Set(ret, Nan::New("isRetriable").ToLocalChecked(), - Nan::New(isRetriable)); - Nan::Set(ret, Nan::New("isTxnRequiresAbort").ToLocalChecked(), - Nan::New(isTxnRequiresAbort)); + (ret).Set(Napi::String::New(env, "isFatal"), + Napi::Boolean::New(env, isFatal)); + (ret).Set(Napi::String::New(env, "isRetriable"), + Napi::Boolean::New(env, isRetriable)); + (ret).Set(Napi::String::New(env, "isTxnRequiresAbort"), + Napi::Boolean::New(env, isTxnRequiresAbort)); return ret; } @@ -92,7 +92,7 @@ Baton Baton::BatonFromErrorAndDestroy(RdKafka::Error *error) { return Baton(err, errstr); } -v8::Local Baton::ToObject() { +Napi::Object Baton::ToObject() { if (m_errstr.empty()) { return RdKafkaError(m_err); } else { @@ -100,7 +100,7 @@ v8::Local Baton::ToObject() { } } -v8::Local Baton::ToTxnObject() { +Napi::Object Baton::ToTxnObject() { return RdKafkaError(m_err, m_errstr, m_isFatal, m_isRetriable, m_isTxnRequiresAbort); // NOLINT } diff --git a/src/errors.h b/src/errors.h index 248d26ad..4538f844 100644 --- a/src/errors.h +++ b/src/errors.h @@ -11,7 +11,8 @@ #ifndef SRC_ERRORS_H_ #define SRC_ERRORS_H_ -#include +#include +#include #include #include @@ -39,8 +40,8 @@ class Baton { RdKafka::ErrorCode err(); std::string errstr(); - v8::Local ToObject(); - v8::Local ToTxnObject(); + Napi::Object ToObject(); + Napi::Object ToTxnObject(); private: void* m_data; @@ -51,8 +52,8 @@ class Baton { bool m_isTxnRequiresAbort; }; -v8::Local RdKafkaError(const RdKafka::ErrorCode &); -v8::Local RdKafkaError(const RdKafka::ErrorCode &, +Napi::Object RdKafkaError(const RdKafka::ErrorCode &); +Napi::Object RdKafkaError(const RdKafka::ErrorCode &, const std::string &); } // namespace NodeKafka diff --git a/src/kafka-consumer.cc b/src/kafka-consumer.cc index 4bc778d4..0a2c7650 100644 --- a/src/kafka-consumer.cc +++ b/src/kafka-consumer.cc @@ -15,7 +15,7 @@ #include "src/kafka-consumer.h" #include "src/workers.h" -using Nan::FunctionCallbackInfo; +using Napi::FunctionCallbackInfo; namespace NodeKafka { @@ -129,7 +129,7 @@ void KafkaConsumer::DeactivateDispatchers() { } void KafkaConsumer::ConfigureCallback(const std::string& string_key, - const v8::Local& cb, + const Napi::Function& cb, bool add) { if (string_key.compare("queue_non_empty_cb") == 0) { if (add) { @@ -522,14 +522,14 @@ std::string KafkaConsumer::RebalanceProtocol() { return m_consumer->rebalance_protocol(); } -Nan::Persistent KafkaConsumer::constructor; +Napi::FunctionReference KafkaConsumer::constructor; -void KafkaConsumer::Init(v8::Local exports) { - Nan::HandleScope scope; +void KafkaConsumer::Init(Napi::Object exports) { + Napi::HandleScope scope(env); + + Napi::FunctionReference tpl = Napi::Function::New(env, New); + tpl->SetClassName(Napi::String::New(env, "KafkaConsumer")); - v8::Local tpl = Nan::New(New); - tpl->SetClassName(Nan::New("KafkaConsumer").ToLocalChecked()); - tpl->InstanceTemplate()->SetInternalFieldCount(1); /* * Lifecycle events inherited from NodeKafka::Connection @@ -537,97 +537,103 @@ void KafkaConsumer::Init(v8::Local exports) { * @sa NodeKafka::Connection */ - Nan::SetPrototypeMethod(tpl, "configureCallbacks", NodeConfigureCallbacks); + InstanceMethod("configureCallbacks", &NodeConfigureCallbacks), /* * @brief Methods to do with establishing state */ - Nan::SetPrototypeMethod(tpl, "connect", NodeConnect); - Nan::SetPrototypeMethod(tpl, "disconnect", NodeDisconnect); - Nan::SetPrototypeMethod(tpl, "getMetadata", NodeGetMetadata); - Nan::SetPrototypeMethod(tpl, "queryWatermarkOffsets", NodeQueryWatermarkOffsets); // NOLINT - Nan::SetPrototypeMethod(tpl, "offsetsForTimes", NodeOffsetsForTimes); - Nan::SetPrototypeMethod(tpl, "getWatermarkOffsets", NodeGetWatermarkOffsets); - Nan::SetPrototypeMethod(tpl, "setSaslCredentials", NodeSetSaslCredentials); - Nan::SetPrototypeMethod(tpl, "setOAuthBearerToken", NodeSetOAuthBearerToken); - Nan::SetPrototypeMethod(tpl, "setOAuthBearerTokenFailure", + InstanceMethod("connect", &NodeConnect), + InstanceMethod("disconnect", &NodeDisconnect), + InstanceMethod("getMetadata", &NodeGetMetadata), + InstanceMethod("queryWatermarkOffsets", &NodeQueryWatermarkOffsets), // NOLINT + InstanceMethod("offsetsForTimes", &NodeOffsetsForTimes), + InstanceMethod("getWatermarkOffsets", &NodeGetWatermarkOffsets), + InstanceMethod("setSaslCredentials", &NodeSetSaslCredentials), + InstanceMethod("setOAuthBearerToken", &NodeSetOAuthBearerToken), + Napi::SetPrototypeMethod(tpl, "setOAuthBearerTokenFailure", NodeSetOAuthBearerTokenFailure); /* * @brief Methods exposed to do with message retrieval */ - Nan::SetPrototypeMethod(tpl, "subscription", NodeSubscription); - Nan::SetPrototypeMethod(tpl, "subscribe", NodeSubscribe); - Nan::SetPrototypeMethod(tpl, "unsubscribe", NodeUnsubscribe); - Nan::SetPrototypeMethod(tpl, "consumeLoop", NodeConsumeLoop); - Nan::SetPrototypeMethod(tpl, "consume", NodeConsume); - Nan::SetPrototypeMethod(tpl, "seek", NodeSeek); + InstanceMethod("subscription", &NodeSubscription), + InstanceMethod("subscribe", &NodeSubscribe), + InstanceMethod("unsubscribe", &NodeUnsubscribe), + InstanceMethod("consumeLoop", &NodeConsumeLoop), + InstanceMethod("consume", &NodeConsume), + InstanceMethod("seek", &NodeSeek), /** * @brief Pausing and resuming */ - Nan::SetPrototypeMethod(tpl, "pause", NodePause); - Nan::SetPrototypeMethod(tpl, "resume", NodeResume); + InstanceMethod("pause", &NodePause), + InstanceMethod("resume", &NodeResume), /* * @brief Methods to do with partition assignment / rebalancing */ - Nan::SetPrototypeMethod(tpl, "committed", NodeCommitted); - Nan::SetPrototypeMethod(tpl, "position", NodePosition); - Nan::SetPrototypeMethod(tpl, "assign", NodeAssign); - Nan::SetPrototypeMethod(tpl, "unassign", NodeUnassign); - Nan::SetPrototypeMethod(tpl, "incrementalAssign", NodeIncrementalAssign); - Nan::SetPrototypeMethod(tpl, "incrementalUnassign", NodeIncrementalUnassign); - Nan::SetPrototypeMethod(tpl, "assignments", NodeAssignments); - Nan::SetPrototypeMethod(tpl, "assignmentLost", NodeAssignmentLost); - Nan::SetPrototypeMethod(tpl, "rebalanceProtocol", NodeRebalanceProtocol); - - Nan::SetPrototypeMethod(tpl, "commit", NodeCommit); - Nan::SetPrototypeMethod(tpl, "commitSync", NodeCommitSync); - Nan::SetPrototypeMethod(tpl, "commitCb", NodeCommitCb); - Nan::SetPrototypeMethod(tpl, "offsetsStore", NodeOffsetsStore); - Nan::SetPrototypeMethod(tpl, "offsetsStoreSingle", NodeOffsetsStoreSingle); - - constructor.Reset((tpl->GetFunction(Nan::GetCurrentContext())) - .ToLocalChecked()); - Nan::Set(exports, Nan::New("KafkaConsumer").ToLocalChecked(), - (tpl->GetFunction(Nan::GetCurrentContext())).ToLocalChecked()); + InstanceMethod("committed", &NodeCommitted), + InstanceMethod("position", &NodePosition), + InstanceMethod("assign", &NodeAssign), + InstanceMethod("unassign", &NodeUnassign), + InstanceMethod("incrementalAssign", &NodeIncrementalAssign), + InstanceMethod("incrementalUnassign", &NodeIncrementalUnassign), + InstanceMethod("assignments", &NodeAssignments), + InstanceMethod("assignmentLost", &NodeAssignmentLost), + InstanceMethod("rebalanceProtocol", &NodeRebalanceProtocol), + + InstanceMethod("commit", &NodeCommit), + InstanceMethod("commitSync", &NodeCommitSync), + InstanceMethod("commitCb", &NodeCommitCb), + InstanceMethod("offsetsStore", &NodeOffsetsStore), + InstanceMethod("offsetsStoreSingle", &NodeOffsetsStoreSingle), + + constructor.Reset((tpl->GetFunction(Napi::GetCurrentContext())) + ); + (exports).Set(Napi::String::New(env, "KafkaConsumer"), + (tpl->GetFunction(Napi::GetCurrentContext()))); } -void KafkaConsumer::New(const Nan::FunctionCallbackInfo& info) { +void KafkaConsumer::New(const Napi::CallbackInfo& info) { + Napi::Env env = info.Env(); if (!info.IsConstructCall()) { - return Nan::ThrowError("non-constructor invocation not supported"); + Napi::Error::New(env, "non-constructor invocation not supported").ThrowAsJavaScriptException(); + return env.Null(); } if (info.Length() < 2) { - return Nan::ThrowError("You must supply global and topic configuration"); + Napi::Error::New(env, "You must supply global and topic configuration").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[0]->IsObject()) { - return Nan::ThrowError("Global configuration data must be specified"); + if (!info[0].IsObject()) { + Napi::Error::New(env, "Global configuration data must be specified").ThrowAsJavaScriptException(); + return env.Null(); } std::string errstr; Conf* gconfig = Conf::create(RdKafka::Conf::CONF_GLOBAL, - (info[0]->ToObject(Nan::GetCurrentContext())).ToLocalChecked(), errstr); + (info[0].ToObject(Napi::GetCurrentContext())), errstr); if (!gconfig) { - return Nan::ThrowError(errstr.c_str()); + Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); + return env.Null(); } // If tconfig isn't set, then just let us pick properties from gconf. Conf* tconfig = nullptr; - if (info[1]->IsObject()) { + if (info[1].IsObject()) { tconfig = Conf::create(RdKafka::Conf::CONF_TOPIC, - (info[1]->ToObject(Nan::GetCurrentContext())).ToLocalChecked(), errstr); + (info[1].ToObject(Napi::GetCurrentContext())), errstr); if (!tconfig) { delete gconfig; - return Nan::ThrowError(errstr.c_str()); + Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); + return env.Null(); } } @@ -641,59 +647,61 @@ void KafkaConsumer::New(const Nan::FunctionCallbackInfo& info) { // basically it sets the configuration data // we don't need to do that because we lazy load it - info.GetReturnValue().Set(info.This()); + return info.This(); } -v8::Local KafkaConsumer::NewInstance(v8::Local arg) { - Nan::EscapableHandleScope scope; +Napi::Object KafkaConsumer::NewInstance(Napi::Value arg) { + Napi::Env env = arg.Env(); + Napi::EscapableHandleScope scope(env); const unsigned argc = 1; - v8::Local argv[argc] = { arg }; - v8::Local cons = Nan::New(constructor); - v8::Local instance = - Nan::NewInstance(cons, argc, argv).ToLocalChecked(); + Napi::Value argv[argc] = { arg }; + Napi::Function cons = Napi::Function::New(env, constructor); + Napi::Object instance = + Napi::NewInstance(cons, argc, argv); return scope.Escape(instance); } /* Node exposed methods */ -NAN_METHOD(KafkaConsumer::NodeCommitted) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeCommitted(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); - if (info.Length() < 3 || !info[0]->IsArray()) { + if (info.Length() < 3 || !info[0].IsArray()) { // Just throw an exception - return Nan::ThrowError("Need to specify an array of topic partitions"); + Napi::Error::New(env, "Need to specify an array of topic partitions").ThrowAsJavaScriptException(); + return env.Null(); } std::vector toppars = - Conversion::TopicPartition::FromV8Array(info[0].As()); + Conversion::TopicPartition::FromV8Array(info[0].As()); int timeout_ms; - Nan::Maybe maybeTimeout = - Nan::To(info[1].As()); + Napi::Maybe maybeTimeout = + info[1].As(.As().Uint32Value()); if (maybeTimeout.IsNothing()) { timeout_ms = 1000; } else { - timeout_ms = static_cast(maybeTimeout.FromJust()); + timeout_ms = static_cast(maybeTimeout); } - v8::Local cb = info[2].As(); - Nan::Callback *callback = new Nan::Callback(cb); + Napi::Function cb = info[2].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(cb); KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - Nan::AsyncQueueWorker( + Napi::AsyncQueueWorker( new Workers::KafkaConsumerCommitted(callback, consumer, toppars, timeout_ms)); - info.GetReturnValue().Set(Nan::Null()); + return env.Null(); } -NAN_METHOD(KafkaConsumer::NodeSubscription) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeSubscription(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); @@ -702,46 +710,47 @@ NAN_METHOD(KafkaConsumer::NodeSubscription) { if (b.err() != RdKafka::ErrorCode::ERR_NO_ERROR) { // Let the JS library throw if we need to so the error can be more rich int error_code = static_cast(b.err()); - return info.GetReturnValue().Set(Nan::New(error_code)); + return return Napi::Number::New(env, error_code); } std::vector * topics = b.data*>(); - info.GetReturnValue().Set(Conversion::Util::ToV8Array(*topics)); + return Conversion::Util::ToV8Array(*topics); delete topics; } -NAN_METHOD(KafkaConsumer::NodePosition) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodePosition(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - if (info.Length() < 1 || !info[0]->IsArray()) { + if (info.Length() < 1 || !info[0].IsArray()) { // Just throw an exception - return Nan::ThrowError("Need to specify an array of topic partitions"); + Napi::Error::New(env, "Need to specify an array of topic partitions").ThrowAsJavaScriptException(); + return env.Null(); } std::vector toppars = - Conversion::TopicPartition::FromV8Array(info[0].As()); + Conversion::TopicPartition::FromV8Array(info[0].As()); Baton b = consumer->Position(toppars); if (b.err() != RdKafka::ErrorCode::ERR_NO_ERROR) { // Let the JS library throw if we need to so the error can be more rich int error_code = static_cast(b.err()); - return info.GetReturnValue().Set(Nan::New(error_code)); + return return Napi::Number::New(env, error_code); } - info.GetReturnValue().Set( - Conversion::TopicPartition::ToV8Array(toppars)); + return + Conversion::TopicPartition::ToV8Array(toppars); // Delete the underlying topic partitions RdKafka::TopicPartition::destroy(toppars); } -NAN_METHOD(KafkaConsumer::NodeAssignments) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeAssignments(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); @@ -750,50 +759,52 @@ NAN_METHOD(KafkaConsumer::NodeAssignments) { if (b.err() != RdKafka::ERR_NO_ERROR) { // Let the JS library throw if we need to so the error can be more rich int error_code = static_cast(b.err()); - return info.GetReturnValue().Set(Nan::New(error_code)); + return return Napi::Number::New(env, error_code); } - info.GetReturnValue().Set( - Conversion::TopicPartition::ToV8Array(consumer->m_partitions)); + return + Conversion::TopicPartition::ToV8Array(consumer->m_partitions); } -NAN_METHOD(KafkaConsumer::NodeAssignmentLost) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeAssignmentLost(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); Baton b = consumer->AssignmentLost(); bool lost = b.data(); - info.GetReturnValue().Set(Nan::New(lost)); + return Napi::Boolean::New(env, lost); } -NAN_METHOD(KafkaConsumer::NodeRebalanceProtocol) { +Napi::Value KafkaConsumer::NodeRebalanceProtocol(const Napi::CallbackInfo& info) { KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); std::string protocol = consumer->RebalanceProtocol(); - info.GetReturnValue().Set(Nan::New(protocol).ToLocalChecked()); + return Napi::String::New(env, protocol); } -NAN_METHOD(KafkaConsumer::NodeAssign) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeAssign(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); - if (info.Length() < 1 || !info[0]->IsArray()) { + if (info.Length() < 1 || !info[0].IsArray()) { // Just throw an exception - return Nan::ThrowError("Need to specify an array of partitions"); + Napi::Error::New(env, "Need to specify an array of partitions").ThrowAsJavaScriptException(); + return env.Null(); } - v8::Local partitions = info[0].As(); + Napi::Array partitions = info[0].As(); std::vector topic_partitions; for (unsigned int i = 0; i < partitions->Length(); ++i) { - v8::Local partition_obj_value; + Napi::Value partition_obj_value; if (!( - Nan::Get(partitions, i).ToLocal(&partition_obj_value) && - partition_obj_value->IsObject())) { - Nan::ThrowError("Must pass topic-partition objects"); + (partitions).Get(i).ToLocal(&partition_obj_value) && + partition_obj_value.IsObject())) { + Napi::Error::New(env, "Must pass topic-partition objects").ThrowAsJavaScriptException(); + } - v8::Local partition_obj = partition_obj_value.As(); + Napi::Object partition_obj = partition_obj_value.As(); // Got the object int64_t partition = GetParameter(partition_obj, "partition", -1); @@ -826,52 +837,56 @@ NAN_METHOD(KafkaConsumer::NodeAssign) { Baton b = consumer->Assign(topic_partitions); if (b.err() != RdKafka::ERR_NO_ERROR) { - Nan::ThrowError(RdKafka::err2str(b.err()).c_str()); + Napi::Error::New(env, RdKafka::err2str(b.err()).c_str()).ThrowAsJavaScriptException(); + } - info.GetReturnValue().Set(Nan::True()); + return env.True(); } -NAN_METHOD(KafkaConsumer::NodeUnassign) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeUnassign(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); if (!consumer->IsClosing() && !consumer->IsConnected()) { - Nan::ThrowError("KafkaConsumer is disconnected"); - return; + Napi::Error::New(env, "KafkaConsumer is disconnected").ThrowAsJavaScriptException(); + return env.Null(); } Baton b = consumer->Unassign(); if (b.err() != RdKafka::ERR_NO_ERROR) { - Nan::ThrowError(RdKafka::err2str(b.err()).c_str()); + Napi::Error::New(env, RdKafka::err2str(b.err()).c_str()).ThrowAsJavaScriptException(); + } - info.GetReturnValue().Set(Nan::True()); + return env.True(); } -NAN_METHOD(KafkaConsumer::NodeIncrementalAssign) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeIncrementalAssign(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); - if (info.Length() < 1 || !info[0]->IsArray()) { + if (info.Length() < 1 || !info[0].IsArray()) { // Just throw an exception - return Nan::ThrowError("Need to specify an array of partitions"); + Napi::Error::New(env, "Need to specify an array of partitions").ThrowAsJavaScriptException(); + return env.Null(); } - v8::Local partitions = info[0].As(); + Napi::Array partitions = info[0].As(); std::vector topic_partitions; for (unsigned int i = 0; i < partitions->Length(); ++i) { - v8::Local partition_obj_value; + Napi::Value partition_obj_value; if (!( - Nan::Get(partitions, i).ToLocal(&partition_obj_value) && - partition_obj_value->IsObject())) { - Nan::ThrowError("Must pass topic-partition objects"); + (partitions).Get(i).ToLocal(&partition_obj_value) && + partition_obj_value.IsObject())) { + Napi::Error::New(env, "Must pass topic-partition objects").ThrowAsJavaScriptException(); + } - v8::Local partition_obj = partition_obj_value.As(); + Napi::Object partition_obj = partition_obj_value.As(); // Got the object int64_t partition = GetParameter(partition_obj, "partition", -1); @@ -904,33 +919,36 @@ NAN_METHOD(KafkaConsumer::NodeIncrementalAssign) { Baton b = consumer->IncrementalAssign(topic_partitions); if (b.err() != RdKafka::ERR_NO_ERROR) { - v8::Local errorObject = b.ToObject(); - Nan::ThrowError(errorObject); + Napi::Value errorObject = b.ToObject(); + Napi::Error::New(env, errorObject).ThrowAsJavaScriptException(); + } - info.GetReturnValue().Set(Nan::True()); + return env.True(); } -NAN_METHOD(KafkaConsumer::NodeIncrementalUnassign) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeIncrementalUnassign(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); - if (info.Length() < 1 || !info[0]->IsArray()) { + if (info.Length() < 1 || !info[0].IsArray()) { // Just throw an exception - return Nan::ThrowError("Need to specify an array of partitions"); + Napi::Error::New(env, "Need to specify an array of partitions").ThrowAsJavaScriptException(); + return env.Null(); } - v8::Local partitions = info[0].As(); + Napi::Array partitions = info[0].As(); std::vector topic_partitions; for (unsigned int i = 0; i < partitions->Length(); ++i) { - v8::Local partition_obj_value; + Napi::Value partition_obj_value; if (!( - Nan::Get(partitions, i).ToLocal(&partition_obj_value) && - partition_obj_value->IsObject())) { - Nan::ThrowError("Must pass topic-partition objects"); + (partitions).Get(i).ToLocal(&partition_obj_value) && + partition_obj_value.IsObject())) { + Napi::Error::New(env, "Must pass topic-partition objects").ThrowAsJavaScriptException(); + } - v8::Local partition_obj = partition_obj_value.As(); + Napi::Object partition_obj = partition_obj_value.As(); // Got the object int64_t partition = GetParameter(partition_obj, "partition", -1); @@ -963,53 +981,54 @@ NAN_METHOD(KafkaConsumer::NodeIncrementalUnassign) { Baton b = consumer->IncrementalUnassign(topic_partitions); if (b.err() != RdKafka::ERR_NO_ERROR) { - v8::Local errorObject = b.ToObject(); - Nan::ThrowError(errorObject); + Napi::Value errorObject = b.ToObject(); + Napi::Error::New(env, errorObject).ThrowAsJavaScriptException(); + } - info.GetReturnValue().Set(Nan::True()); + return env.True(); } -NAN_METHOD(KafkaConsumer::NodeUnsubscribe) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeUnsubscribe(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); Baton b = consumer->Unsubscribe(); - info.GetReturnValue().Set(Nan::New(static_cast(b.err()))); + return Napi::Number::New(env, static_cast(b.err())); } -NAN_METHOD(KafkaConsumer::NodeCommit) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeCommit(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); int error_code; KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); if (!consumer->IsConnected()) { - Nan::ThrowError("KafkaConsumer is disconnected"); - return; + Napi::Error::New(env, "KafkaConsumer is disconnected").ThrowAsJavaScriptException(); + return env.Null(); } - if (info[0]->IsNull() || info[0]->IsUndefined()) { + if (info[0].IsNull() || info[0].IsUndefined()) { Baton b = consumer->Commit(); error_code = static_cast(b.err()); - } else if (info[0]->IsArray()) { + } else if (info[0].IsArray()) { std::vector toppars = - Conversion::TopicPartition::FromV8Array(info[0].As()); + Conversion::TopicPartition::FromV8Array(info[0].As()); Baton b = consumer->Commit(toppars); error_code = static_cast(b.err()); RdKafka::TopicPartition::destroy(toppars); - } else if (info[0]->IsObject()) { + } else if (info[0].IsObject()) { RdKafka::TopicPartition * toppar = - Conversion::TopicPartition::FromV8Object(info[0].As()); + Conversion::TopicPartition::FromV8Object(info[0].As()); if (toppar == NULL) { - Nan::ThrowError("Invalid topic partition provided"); - return; + Napi::Error::New(env, "Invalid topic partition provided").ThrowAsJavaScriptException(); + return env.Null(); } Baton b = consumer->Commit(toppar); @@ -1017,42 +1036,42 @@ NAN_METHOD(KafkaConsumer::NodeCommit) { delete toppar; } else { - Nan::ThrowError("First parameter must be an object or an array"); - return; + Napi::Error::New(env, "First parameter must be an object or an array").ThrowAsJavaScriptException(); + return env.Null(); } - info.GetReturnValue().Set(Nan::New(error_code)); + return Napi::Number::New(env, error_code); } -NAN_METHOD(KafkaConsumer::NodeCommitSync) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeCommitSync(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); int error_code; KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); if (!consumer->IsConnected()) { - Nan::ThrowError("KafkaConsumer is disconnected"); - return; + Napi::Error::New(env, "KafkaConsumer is disconnected").ThrowAsJavaScriptException(); + return env.Null(); } - if (info[0]->IsNull() || info[0]->IsUndefined()) { + if (info[0].IsNull() || info[0].IsUndefined()) { Baton b = consumer->CommitSync(); error_code = static_cast(b.err()); - } else if (info[0]->IsArray()) { + } else if (info[0].IsArray()) { std::vector toppars = - Conversion::TopicPartition::FromV8Array(info[0].As()); + Conversion::TopicPartition::FromV8Array(info[0].As()); Baton b = consumer->CommitSync(toppars); error_code = static_cast(b.err()); RdKafka::TopicPartition::destroy(toppars); - } else if (info[0]->IsObject()) { + } else if (info[0].IsObject()) { RdKafka::TopicPartition * toppar = - Conversion::TopicPartition::FromV8Object(info[0].As()); + Conversion::TopicPartition::FromV8Object(info[0].As()); if (toppar == NULL) { - Nan::ThrowError("Invalid topic partition provided"); - return; + Napi::Error::New(env, "Invalid topic partition provided").ThrowAsJavaScriptException(); + return env.Null(); } Baton b = consumer->CommitSync(toppar); @@ -1060,101 +1079,106 @@ NAN_METHOD(KafkaConsumer::NodeCommitSync) { delete toppar; } else { - Nan::ThrowError("First parameter must be an object or an array"); - return; + Napi::Error::New(env, "First parameter must be an object or an array").ThrowAsJavaScriptException(); + return env.Null(); } - info.GetReturnValue().Set(Nan::New(error_code)); + return Napi::Number::New(env, error_code); } -NAN_METHOD(KafkaConsumer::NodeCommitCb) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeCommitCb(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); int error_code; std::optional> toppars = std::nullopt; - Nan::Callback *callback; + Napi::FunctionReference *callback; KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); if (!consumer->IsConnected()) { - Nan::ThrowError("KafkaConsumer is disconnected"); - return; + Napi::Error::New(env, "KafkaConsumer is disconnected").ThrowAsJavaScriptException(); + return env.Null(); } if (info.Length() != 2) { - Nan::ThrowError("Two arguments are required"); - return; + Napi::Error::New(env, "Two arguments are required").ThrowAsJavaScriptException(); + return env.Null(); } if (!( - (info[0]->IsArray() || info[0]->IsNull()) && - info[1]->IsFunction())) { - Nan::ThrowError( + (info[0].IsArray() || info[0].IsNull()) && + info[1].IsFunction())) { + Napi::ThrowError( "First argument should be an array or null and second one a callback"); return; } - if (info[0]->IsArray()) { + if (info[0].IsArray()) { toppars = - Conversion::TopicPartition::FromV8Array(info[0].As()); + Conversion::TopicPartition::FromV8Array(info[0].As()); } - callback = new Nan::Callback(info[1].As()); + callback = new Napi::FunctionReference(info[1].As()); - Nan::AsyncQueueWorker( + Napi::AsyncQueueWorker( new Workers::KafkaConsumerCommitCb(callback, consumer, toppars)); - info.GetReturnValue().Set(Nan::Null()); + return env.Null(); } -NAN_METHOD(KafkaConsumer::NodeSubscribe) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeSubscribe(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); - if (info.Length() < 1 || !info[0]->IsArray()) { + if (info.Length() < 1 || !info[0].IsArray()) { // Just throw an exception - return Nan::ThrowError("First parameter must be an array"); + Napi::Error::New(env, "First parameter must be an array").ThrowAsJavaScriptException(); + return env.Null(); } KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - v8::Local topicsArray = info[0].As(); + Napi::Array topicsArray = info[0].As(); std::vector topics = Conversion::Util::ToStringVector(topicsArray); Baton b = consumer->Subscribe(topics); int error_code = static_cast(b.err()); - info.GetReturnValue().Set(Nan::New(error_code)); + return Napi::Number::New(env, error_code); } -NAN_METHOD(KafkaConsumer::NodeSeek) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeSeek(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); // If number of parameters is less than 3 (need topic partition, timeout, // and callback), we can't call this thing if (info.Length() < 3) { - return Nan::ThrowError("Must provide a topic partition, timeout, and callback"); // NOLINT + Napi::Error::New(env, "Must provide a topic partition, timeout, and callback").ThrowAsJavaScriptException(); + return env.Null(); // NOLINT } - if (!info[0]->IsObject()) { - return Nan::ThrowError("Topic partition must be an object"); + if (!info[0].IsObject()) { + Napi::Error::New(env, "Topic partition must be an object").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[1]->IsNumber() && !info[1]->IsNull()) { - return Nan::ThrowError("Timeout must be a number."); + if (!info[1].IsNumber() && !info[1].IsNull()) { + Napi::Error::New(env, "Timeout must be a number.").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[2]->IsFunction()) { - return Nan::ThrowError("Callback must be a function"); + if (!info[2].IsFunction()) { + Napi::Error::New(env, "Callback must be a function").ThrowAsJavaScriptException(); + return env.Null(); } int timeout_ms; - Nan::Maybe maybeTimeout = - Nan::To(info[1].As()); + Napi::Maybe maybeTimeout = + info[1].As(.As().Uint32Value()); if (maybeTimeout.IsNothing()) { timeout_ms = 1000; } else { - timeout_ms = static_cast(maybeTimeout.FromJust()); + timeout_ms = static_cast(maybeTimeout); // Do not allow timeouts of less than 10. Providing 0 causes segfaults // because it makes it asynchronous. if (timeout_ms < 10) { @@ -1165,63 +1189,66 @@ NAN_METHOD(KafkaConsumer::NodeSeek) { KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); const RdKafka::TopicPartition * toppar = - Conversion::TopicPartition::FromV8Object(info[0].As()); + Conversion::TopicPartition::FromV8Object(info[0].As()); if (!toppar) { - return Nan::ThrowError("Invalid topic partition provided"); + Napi::Error::New(env, "Invalid topic partition provided").ThrowAsJavaScriptException(); + return env.Null(); } - Nan::Callback *callback = new Nan::Callback(info[2].As()); - Nan::AsyncQueueWorker( + Napi::FunctionReference *callback = new Napi::FunctionReference(info[2].As()); + Napi::AsyncQueueWorker( new Workers::KafkaConsumerSeek(callback, consumer, toppar, timeout_ms)); - info.GetReturnValue().Set(Nan::Null()); + return env.Null(); } -NAN_METHOD(KafkaConsumer::NodeOffsetsStore) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeOffsetsStore(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); // If number of parameters is less than 3 (need topic partition, timeout, // and callback), we can't call this thing if (info.Length() < 1) { - return Nan::ThrowError("Must provide a list of topic partitions"); + Napi::Error::New(env, "Must provide a list of topic partitions").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[0]->IsArray()) { - return Nan::ThrowError("Topic partition must be an array of objects"); + if (!info[0].IsArray()) { + Napi::Error::New(env, "Topic partition must be an array of objects").ThrowAsJavaScriptException(); + return env.Null(); } KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); std::vector toppars = - Conversion::TopicPartition::FromV8Array(info[0].As()); + Conversion::TopicPartition::FromV8Array(info[0].As()); Baton b = consumer->OffsetsStore(toppars); RdKafka::TopicPartition::destroy(toppars); int error_code = static_cast(b.err()); - info.GetReturnValue().Set(Nan::New(error_code)); + return Napi::Number::New(env, error_code); } -NAN_METHOD(KafkaConsumer::NodeOffsetsStoreSingle) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeOffsetsStoreSingle(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); // If number of parameters is less than 3 (need topic partition, partition, // offset, and leader epoch), we can't call this. if (info.Length() < 4) { - return Nan::ThrowError( + return Napi::ThrowError( "Must provide topic, partition, offset and leaderEpoch"); } KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); // Get string pointer for the topic name - Nan::Utf8String topicUTF8(Nan::To(info[0]).ToLocalChecked()); + std::string topicUTF8 = info[0].As(.To()); const std::string& topic_name(*topicUTF8); - int64_t partition = Nan::To(info[1]).FromJust(); - int64_t offset = Nan::To(info[2]).FromJust(); - int64_t leader_epoch = Nan::To(info[3]).FromJust(); + int64_t partition = info[1].As().Int64Value(); + int64_t offset = info[2].As().Int64Value(); + int64_t leader_epoch = info[3].As().Int64Value(); RdKafka::TopicPartition* toppar = RdKafka::TopicPartition::create(topic_name, partition, offset); @@ -1233,26 +1260,28 @@ NAN_METHOD(KafkaConsumer::NodeOffsetsStoreSingle) { delete toppar; int error_code = static_cast(b.err()); - info.GetReturnValue().Set(Nan::New(error_code)); + return Napi::Number::New(env, error_code); } -NAN_METHOD(KafkaConsumer::NodePause) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodePause(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); // If number of parameters is less than 3 (need topic partition, timeout, // and callback), we can't call this thing if (info.Length() < 1) { - return Nan::ThrowError("Must provide a list of topic partitions"); + Napi::Error::New(env, "Must provide a list of topic partitions").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[0]->IsArray()) { - return Nan::ThrowError("Topic partition must be an array of objects"); + if (!info[0].IsArray()) { + Napi::Error::New(env, "Topic partition must be an array of objects").ThrowAsJavaScriptException(); + return env.Null(); } KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); std::vector toppars = - Conversion::TopicPartition::FromV8Array(info[0].As()); + Conversion::TopicPartition::FromV8Array(info[0].As()); Baton b = consumer->Pause(toppars); RdKafka::TopicPartition::destroy(toppars); @@ -1271,26 +1300,28 @@ NAN_METHOD(KafkaConsumer::NodePause) { #endif int error_code = static_cast(b.err()); - info.GetReturnValue().Set(Nan::New(error_code)); + return Napi::Number::New(env, error_code); } -NAN_METHOD(KafkaConsumer::NodeResume) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeResume(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); // If number of parameters is less than 3 (need topic partition, timeout, // and callback), we can't call this thing if (info.Length() < 1) { - return Nan::ThrowError("Must provide a list of topic partitions"); // NOLINT + Napi::Error::New(env, "Must provide a list of topic partitions").ThrowAsJavaScriptException(); + return env.Null(); // NOLINT } - if (!info[0]->IsArray()) { - return Nan::ThrowError("Topic partition must be an array of objects"); + if (!info[0].IsArray()) { + Napi::Error::New(env, "Topic partition must be an array of objects").ThrowAsJavaScriptException(); + return env.Null(); } KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); std::vector toppars = - Conversion::TopicPartition::FromV8Array(info[0].As()); + Conversion::TopicPartition::FromV8Array(info[0].As()); Baton b = consumer->Resume(toppars); @@ -1306,146 +1337,159 @@ NAN_METHOD(KafkaConsumer::NodeResume) { } int error_code = static_cast(b.err()); - info.GetReturnValue().Set(Nan::New(error_code)); + return Napi::Number::New(env, error_code); } -NAN_METHOD(KafkaConsumer::NodeConsumeLoop) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeConsumeLoop(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); if (info.Length() < 3) { // Just throw an exception - return Nan::ThrowError("Invalid number of parameters"); + Napi::Error::New(env, "Invalid number of parameters").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[0]->IsNumber()) { - return Nan::ThrowError("Need to specify a timeout"); + if (!info[0].IsNumber()) { + Napi::Error::New(env, "Need to specify a timeout").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[1]->IsNumber()) { - return Nan::ThrowError("Need to specify a sleep delay"); + if (!info[1].IsNumber()) { + Napi::Error::New(env, "Need to specify a sleep delay").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[2]->IsFunction()) { - return Nan::ThrowError("Need to specify a callback"); + if (!info[2].IsFunction()) { + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } int timeout_ms; - Nan::Maybe maybeTimeout = - Nan::To(info[0].As()); + Napi::Maybe maybeTimeout = + info[0].As(.As().Uint32Value()); if (maybeTimeout.IsNothing()) { timeout_ms = 1000; } else { - timeout_ms = static_cast(maybeTimeout.FromJust()); + timeout_ms = static_cast(maybeTimeout); } int timeout_sleep_delay_ms; - Nan::Maybe maybeSleep = - Nan::To(info[1].As()); + Napi::Maybe maybeSleep = + info[1].As(.As().Uint32Value()); if (maybeSleep.IsNothing()) { timeout_sleep_delay_ms = 500; } else { - timeout_sleep_delay_ms = static_cast(maybeSleep.FromJust()); + timeout_sleep_delay_ms = static_cast(maybeSleep); } KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); if (consumer->m_consume_loop != nullptr) { - return Nan::ThrowError("Consume was already called"); + Napi::Error::New(env, "Consume was already called").ThrowAsJavaScriptException(); + return env.Null(); } if (!consumer->IsConnected()) { - return Nan::ThrowError("Connect must be called before consume"); + Napi::Error::New(env, "Connect must be called before consume").ThrowAsJavaScriptException(); + return env.Null(); } - v8::Local cb = info[2].As(); + Napi::Function cb = info[2].As(); - Nan::Callback *callback = new Nan::Callback(cb); + Napi::FunctionReference *callback = new Napi::FunctionReference(cb); consumer->m_consume_loop = new Workers::KafkaConsumerConsumeLoop(callback, consumer, timeout_ms, timeout_sleep_delay_ms); // NOLINT - info.GetReturnValue().Set(Nan::Null()); + return env.Null(); } -NAN_METHOD(KafkaConsumer::NodeConsume) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeConsume(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); if (info.Length() < 2) { // Just throw an exception - return Nan::ThrowError("Invalid number of parameters"); + Napi::Error::New(env, "Invalid number of parameters").ThrowAsJavaScriptException(); + return env.Null(); } int timeout_ms; - Nan::Maybe maybeTimeout = - Nan::To(info[0].As()); + Napi::Maybe maybeTimeout = + info[0].As(.As().Uint32Value()); if (maybeTimeout.IsNothing()) { timeout_ms = 1000; } else { - timeout_ms = static_cast(maybeTimeout.FromJust()); + timeout_ms = static_cast(maybeTimeout); } - if (info[1]->IsNumber()) { - if (!info[2]->IsBoolean()) { - return Nan::ThrowError("Need to specify a boolean"); + if (info[1].IsNumber()) { + if (!info[2].IsBoolean()) { + Napi::Error::New(env, "Need to specify a boolean").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[3]->IsFunction()) { - return Nan::ThrowError("Need to specify a callback"); + if (!info[3].IsFunction()) { + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } - v8::Local numMessagesNumber = info[1].As(); - Nan::Maybe numMessagesMaybe = Nan::To(numMessagesNumber); // NOLINT + Napi::Number numMessagesNumber = info[1].As(); + Napi::Maybe numMessagesMaybe = numMessagesNumber.As().Uint32Value(); // NOLINT uint32_t numMessages; if (numMessagesMaybe.IsNothing()) { - return Nan::ThrowError("Parameter must be a number over 0"); + Napi::Error::New(env, "Parameter must be a number over 0").ThrowAsJavaScriptException(); + return env.Null(); } else { - numMessages = numMessagesMaybe.FromJust(); + numMessages = numMessagesMaybe; } - v8::Local isTimeoutOnlyForFirstMessageBoolean = info[2].As(); // NOLINT - Nan::Maybe isTimeoutOnlyForFirstMessageMaybe = - Nan::To(isTimeoutOnlyForFirstMessageBoolean); + Napi::Boolean isTimeoutOnlyForFirstMessageBoolean = info[2].As(); // NOLINT + Napi::Maybe isTimeoutOnlyForFirstMessageMaybe = + isTimeoutOnlyForFirstMessageBoolean.As().Value(); bool isTimeoutOnlyForFirstMessage; if (isTimeoutOnlyForFirstMessageMaybe.IsNothing()) { - return Nan::ThrowError("Parameter must be a boolean"); + Napi::Error::New(env, "Parameter must be a boolean").ThrowAsJavaScriptException(); + return env.Null(); } else { - isTimeoutOnlyForFirstMessage = isTimeoutOnlyForFirstMessageMaybe.FromJust(); // NOLINT + isTimeoutOnlyForFirstMessage = isTimeoutOnlyForFirstMessageMaybe; // NOLINT } KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - v8::Local cb = info[3].As(); - Nan::Callback *callback = new Nan::Callback(cb); - Nan::AsyncQueueWorker( + Napi::Function cb = info[3].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(cb); + Napi::AsyncQueueWorker( new Workers::KafkaConsumerConsumeNum(callback, consumer, numMessages, timeout_ms, isTimeoutOnlyForFirstMessage)); // NOLINT } else { - if (!info[1]->IsFunction()) { - return Nan::ThrowError("Need to specify a callback"); + if (!info[1].IsFunction()) { + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - v8::Local cb = info[1].As(); - Nan::Callback *callback = new Nan::Callback(cb); - Nan::AsyncQueueWorker( + Napi::Function cb = info[1].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(cb); + Napi::AsyncQueueWorker( new Workers::KafkaConsumerConsume(callback, consumer, timeout_ms)); } - info.GetReturnValue().Set(Nan::Null()); + return env.Null(); } -NAN_METHOD(KafkaConsumer::NodeConnect) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeConnect(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); - if (info.Length() < 1 || !info[0]->IsFunction()) { + if (info.Length() < 1 || !info[0].IsFunction()) { // Just throw an exception - return Nan::ThrowError("Need to specify a callback"); + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); @@ -1455,22 +1499,23 @@ NAN_METHOD(KafkaConsumer::NodeConnect) { // We will deactivate them if the connection fails. consumer->ActivateDispatchers(); - Nan::Callback *callback = new Nan::Callback(info[0].As()); - Nan::AsyncQueueWorker(new Workers::KafkaConsumerConnect(callback, consumer)); + Napi::FunctionReference *callback = new Napi::FunctionReference(info[0].As()); + new Workers::KafkaConsumerConnect(callback, consumer).Queue(); - info.GetReturnValue().Set(Nan::Null()); + return env.Null(); } -NAN_METHOD(KafkaConsumer::NodeDisconnect) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeDisconnect(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); - if (info.Length() < 1 || !info[0]->IsFunction()) { + if (info.Length() < 1 || !info[0].IsFunction()) { // Just throw an exception - return Nan::ThrowError("Need to specify a callback"); + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } - v8::Local cb = info[0].As(); - Nan::Callback *callback = new Nan::Callback(cb); + Napi::Function cb = info[0].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(cb); KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); Workers::KafkaConsumerConsumeLoop* consumeLoop = @@ -1486,33 +1531,34 @@ NAN_METHOD(KafkaConsumer::NodeDisconnect) { consumer->m_consume_loop = nullptr; } - Nan::AsyncQueueWorker( + Napi::AsyncQueueWorker( new Workers::KafkaConsumerDisconnect(callback, consumer)); - info.GetReturnValue().Set(Nan::Null()); + return env.Null(); } -NAN_METHOD(KafkaConsumer::NodeGetWatermarkOffsets) { - Nan::HandleScope scope; +Napi::Value KafkaConsumer::NodeGetWatermarkOffsets(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); KafkaConsumer* obj = ObjectWrap::Unwrap(info.This()); - if (!info[0]->IsString()) { - Nan::ThrowError("1st parameter must be a topic string");; + if (!info[0].IsString()) { + Napi::Error::New(env, "1st parameter must be a topic string").ThrowAsJavaScriptException(); +; return; } - if (!info[1]->IsNumber()) { - Nan::ThrowError("2nd parameter must be a partition number"); - return; + if (!info[1].IsNumber()) { + Napi::Error::New(env, "2nd parameter must be a partition number").ThrowAsJavaScriptException(); + return env.Null(); } // Get string pointer for the topic name - Nan::Utf8String topicUTF8(Nan::To(info[0]).ToLocalChecked()); + std::string topicUTF8 = info[0].As(.To()); // The first parameter is the topic std::string topic_name(*topicUTF8); // Second parameter is the partition - int32_t partition = Nan::To(info[1]).FromJust(); + int32_t partition = info[1].As().Int32Value(); // Set these ints which will store the return data int64_t low_offset; @@ -1524,15 +1570,15 @@ NAN_METHOD(KafkaConsumer::NodeGetWatermarkOffsets) { if (b.err() != RdKafka::ERR_NO_ERROR) { // Let the JS library throw if we need to so the error can be more rich int error_code = static_cast(b.err()); - return info.GetReturnValue().Set(Nan::New(error_code)); + return return Napi::Number::New(env, error_code); } else { - v8::Local offsetsObj = Nan::New(); - Nan::Set(offsetsObj, Nan::New("lowOffset").ToLocalChecked(), - Nan::New(low_offset)); - Nan::Set(offsetsObj, Nan::New("highOffset").ToLocalChecked(), - Nan::New(high_offset)); + Napi::Object offsetsObj = Napi::Object::New(env); + (offsetsObj).Set(Napi::String::New(env, "lowOffset"), + Napi::Number::New(env, low_offset)); + (offsetsObj).Set(Napi::String::New(env, "highOffset"), + Napi::Number::New(env, high_offset)); - return info.GetReturnValue().Set(offsetsObj); + return return offsetsObj; } } diff --git a/src/kafka-consumer.h b/src/kafka-consumer.h index e0d93562..774dd811 100644 --- a/src/kafka-consumer.h +++ b/src/kafka-consumer.h @@ -11,7 +11,8 @@ #ifndef SRC_KAFKA_CONSUMER_H_ #define SRC_KAFKA_CONSUMER_H_ -#include +#include +#include #include #include #include @@ -38,8 +39,8 @@ namespace NodeKafka { class KafkaConsumer : public Connection { friend class Producer; public: - static void Init(v8::Local); - static v8::Local NewInstance(v8::Local); + static void Init(Napi::Object); + static Napi::Object NewInstance(Napi::Value); Baton Connect(); Baton Disconnect(); @@ -90,11 +91,11 @@ class KafkaConsumer : public Connection { void DeactivateDispatchers(); void ConfigureCallback(const std::string& string_key, - const v8::Local& cb, bool add) override; + const Napi::Function& cb, bool add) override; protected: - static Nan::Persistent constructor; - static void New(const Nan::FunctionCallbackInfo& info); + static Napi::FunctionReference constructor; + static void New(const Napi::CallbackInfo& info); KafkaConsumer(Conf *, Conf *); ~KafkaConsumer(); @@ -114,32 +115,32 @@ class KafkaConsumer : public Connection { RdKafka::KafkaConsumer *m_consumer = nullptr; // Node methods - static NAN_METHOD(NodeConnect); - static NAN_METHOD(NodeSubscribe); - static NAN_METHOD(NodeDisconnect); - static NAN_METHOD(NodeAssign); - static NAN_METHOD(NodeUnassign); - static NAN_METHOD(NodeIncrementalAssign); - static NAN_METHOD(NodeIncrementalUnassign); - static NAN_METHOD(NodeAssignments); - static NAN_METHOD(NodeAssignmentLost); - static NAN_METHOD(NodeRebalanceProtocol); - static NAN_METHOD(NodeUnsubscribe); - static NAN_METHOD(NodeCommit); - static NAN_METHOD(NodeCommitSync); - static NAN_METHOD(NodeCommitCb); - static NAN_METHOD(NodeOffsetsStore); - static NAN_METHOD(NodeOffsetsStoreSingle); - static NAN_METHOD(NodeCommitted); - static NAN_METHOD(NodePosition); - static NAN_METHOD(NodeSubscription); - static NAN_METHOD(NodeSeek); - static NAN_METHOD(NodeGetWatermarkOffsets); - static NAN_METHOD(NodeConsumeLoop); - static NAN_METHOD(NodeConsume); - - static NAN_METHOD(NodePause); - static NAN_METHOD(NodeResume); + static Napi::Value NodeConnect(const Napi::CallbackInfo& info); + static Napi::Value NodeSubscribe(const Napi::CallbackInfo& info); + static Napi::Value NodeDisconnect(const Napi::CallbackInfo& info); + static Napi::Value NodeAssign(const Napi::CallbackInfo& info); + static Napi::Value NodeUnassign(const Napi::CallbackInfo& info); + static Napi::Value NodeIncrementalAssign(const Napi::CallbackInfo& info); + static Napi::Value NodeIncrementalUnassign(const Napi::CallbackInfo& info); + static Napi::Value NodeAssignments(const Napi::CallbackInfo& info); + static Napi::Value NodeAssignmentLost(const Napi::CallbackInfo& info); + static Napi::Value NodeRebalanceProtocol(const Napi::CallbackInfo& info); + static Napi::Value NodeUnsubscribe(const Napi::CallbackInfo& info); + static Napi::Value NodeCommit(const Napi::CallbackInfo& info); + static Napi::Value NodeCommitSync(const Napi::CallbackInfo& info); + static Napi::Value NodeCommitCb(const Napi::CallbackInfo& info); + static Napi::Value NodeOffsetsStore(const Napi::CallbackInfo& info); + static Napi::Value NodeOffsetsStoreSingle(const Napi::CallbackInfo& info); + static Napi::Value NodeCommitted(const Napi::CallbackInfo& info); + static Napi::Value NodePosition(const Napi::CallbackInfo& info); + static Napi::Value NodeSubscription(const Napi::CallbackInfo& info); + static Napi::Value NodeSeek(const Napi::CallbackInfo& info); + static Napi::Value NodeGetWatermarkOffsets(const Napi::CallbackInfo& info); + static Napi::Value NodeConsumeLoop(const Napi::CallbackInfo& info); + static Napi::Value NodeConsume(const Napi::CallbackInfo& info); + + static Napi::Value NodePause(const Napi::CallbackInfo& info); + static Napi::Value NodeResume(const Napi::CallbackInfo& info); }; } // namespace NodeKafka diff --git a/src/producer.cc b/src/producer.cc index 68d8ad75..0e204628 100644 --- a/src/producer.cc +++ b/src/producer.cc @@ -47,14 +47,14 @@ Producer::~Producer() { Disconnect(); } -Nan::Persistent Producer::constructor; +Napi::FunctionReference Producer::constructor; -void Producer::Init(v8::Local exports) { - Nan::HandleScope scope; +void Producer::Init(Napi::Object exports) { + Napi::HandleScope scope(env); + + Napi::FunctionReference tpl = Napi::Function::New(env, New); + tpl->SetClassName(Napi::String::New(env, "Producer")); - v8::Local tpl = Nan::New(New); - tpl->SetClassName(Nan::New("Producer").ToLocalChecked()); - tpl->InstanceTemplate()->SetInternalFieldCount(1); /* * Lifecycle events inherited from NodeKafka::Connection @@ -62,84 +62,90 @@ void Producer::Init(v8::Local exports) { * @sa NodeKafka::Connection */ - Nan::SetPrototypeMethod(tpl, "configureCallbacks", NodeConfigureCallbacks); + InstanceMethod("configureCallbacks", &NodeConfigureCallbacks), /* * @brief Methods to do with establishing state */ - Nan::SetPrototypeMethod(tpl, "connect", NodeConnect); - Nan::SetPrototypeMethod(tpl, "disconnect", NodeDisconnect); - Nan::SetPrototypeMethod(tpl, "getMetadata", NodeGetMetadata); - Nan::SetPrototypeMethod(tpl, "queryWatermarkOffsets", NodeQueryWatermarkOffsets); // NOLINT - Nan::SetPrototypeMethod(tpl, "poll", NodePoll); - Nan::SetPrototypeMethod(tpl, "setPollInBackground", NodeSetPollInBackground); - Nan::SetPrototypeMethod(tpl, "setSaslCredentials", NodeSetSaslCredentials); - Nan::SetPrototypeMethod(tpl, "setOAuthBearerToken", NodeSetOAuthBearerToken); - Nan::SetPrototypeMethod(tpl, "setOAuthBearerTokenFailure", + InstanceMethod("connect", &NodeConnect), + InstanceMethod("disconnect", &NodeDisconnect), + InstanceMethod("getMetadata", &NodeGetMetadata), + InstanceMethod("queryWatermarkOffsets", &NodeQueryWatermarkOffsets), // NOLINT + InstanceMethod("poll", &NodePoll), + InstanceMethod("setPollInBackground", &NodeSetPollInBackground), + InstanceMethod("setSaslCredentials", &NodeSetSaslCredentials), + InstanceMethod("setOAuthBearerToken", &NodeSetOAuthBearerToken), + Napi::SetPrototypeMethod(tpl, "setOAuthBearerTokenFailure", NodeSetOAuthBearerTokenFailure); /* * @brief Methods exposed to do with message production */ - Nan::SetPrototypeMethod(tpl, "setPartitioner", NodeSetPartitioner); - Nan::SetPrototypeMethod(tpl, "produce", NodeProduce); + InstanceMethod("setPartitioner", &NodeSetPartitioner), + InstanceMethod("produce", &NodeProduce), - Nan::SetPrototypeMethod(tpl, "flush", NodeFlush); + InstanceMethod("flush", &NodeFlush), /* * @brief Methods exposed to do with transactions */ - Nan::SetPrototypeMethod(tpl, "initTransactions", NodeInitTransactions); - Nan::SetPrototypeMethod(tpl, "beginTransaction", NodeBeginTransaction); - Nan::SetPrototypeMethod(tpl, "commitTransaction", NodeCommitTransaction); - Nan::SetPrototypeMethod(tpl, "abortTransaction", NodeAbortTransaction); - Nan::SetPrototypeMethod(tpl, "sendOffsetsToTransaction", NodeSendOffsetsToTransaction); // NOLINT + InstanceMethod("initTransactions", &NodeInitTransactions), + InstanceMethod("beginTransaction", &NodeBeginTransaction), + InstanceMethod("commitTransaction", &NodeCommitTransaction), + InstanceMethod("abortTransaction", &NodeAbortTransaction), + InstanceMethod("sendOffsetsToTransaction", &NodeSendOffsetsToTransaction), // NOLINT // connect. disconnect. resume. pause. get meta data - constructor.Reset((tpl->GetFunction(Nan::GetCurrentContext())) - .ToLocalChecked()); + constructor.Reset((tpl->GetFunction(Napi::GetCurrentContext())) + ); - Nan::Set(exports, Nan::New("Producer").ToLocalChecked(), - tpl->GetFunction(Nan::GetCurrentContext()).ToLocalChecked()); + (exports).Set(Napi::String::New(env, "Producer"), + tpl->GetFunction(Napi::GetCurrentContext())); } -void Producer::New(const Nan::FunctionCallbackInfo& info) { +void Producer::New(const Napi::CallbackInfo& info) { + Napi::Env env = info.Env(); if (!info.IsConstructCall()) { - return Nan::ThrowError("non-constructor invocation not supported"); + Napi::Error::New(env, "non-constructor invocation not supported").ThrowAsJavaScriptException(); + return env.Null(); } if (info.Length() < 2) { - return Nan::ThrowError("You must supply global and topic configuration"); + Napi::Error::New(env, "You must supply global and topic configuration").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[0]->IsObject()) { - return Nan::ThrowError("Global configuration data must be specified"); + if (!info[0].IsObject()) { + Napi::Error::New(env, "Global configuration data must be specified").ThrowAsJavaScriptException(); + return env.Null(); } std::string errstr; Conf* gconfig = Conf::create(RdKafka::Conf::CONF_GLOBAL, - (info[0]->ToObject(Nan::GetCurrentContext())).ToLocalChecked(), errstr); + (info[0].ToObject(Napi::GetCurrentContext())), errstr); if (!gconfig) { - return Nan::ThrowError(errstr.c_str()); + Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); + return env.Null(); } // If tconfig isn't set, then just let us pick properties from gconf. Conf* tconfig = nullptr; - if (info[1]->IsObject()) { + if (info[1].IsObject()) { tconfig = Conf::create( RdKafka::Conf::CONF_TOPIC, - (info[1]->ToObject(Nan::GetCurrentContext())).ToLocalChecked(), errstr); + (info[1].ToObject(Napi::GetCurrentContext())), errstr); if (!tconfig) { // No longer need this since we aren't instantiating anything delete gconfig; - return Nan::ThrowError(errstr.c_str()); + Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); + return env.Null(); } } @@ -152,18 +158,19 @@ void Producer::New(const Nan::FunctionCallbackInfo& info) { // basically it sets the configuration data // we don't need to do that because we lazy load it - info.GetReturnValue().Set(info.This()); + return info.This(); } -v8::Local Producer::NewInstance(v8::Local arg) { - Nan::EscapableHandleScope scope; +Napi::Object Producer::NewInstance(Napi::Value arg) { + Napi::Env env = arg.Env(); + Napi::EscapableHandleScope scope(env); const unsigned argc = 1; - v8::Local argv[argc] = { arg }; - v8::Local cons = Nan::New(constructor); - v8::Local instance = - Nan::NewInstance(cons, argc, argv).ToLocalChecked(); + Napi::Value argv[argc] = { arg }; + Napi::Function cons = Napi::Function::New(env, constructor); + Napi::Object instance = + Napi::NewInstance(cons, argc, argv); return scope.Escape(instance); } @@ -370,15 +377,15 @@ Baton Producer::SetPollInBackground(bool set) { } void Producer::ConfigureCallback(const std::string& string_key, - const v8::Local& cb, bool add) { + const Napi::Function& cb, bool add) { if (string_key.compare("delivery_cb") == 0) { if (add) { bool dr_msg_cb = false; - v8::Local dr_msg_cb_key = Nan::New("dr_msg_cb").ToLocalChecked(); // NOLINT - if (Nan::Has(cb, dr_msg_cb_key).FromMaybe(false)) { - v8::Local v = Nan::Get(cb, dr_msg_cb_key).ToLocalChecked(); + Napi::String dr_msg_cb_key = Napi::String::New(env, "dr_msg_cb"); // NOLINT + if ((cb).Has(dr_msg_cb_key).FromMaybe(false)) { + Napi::Value v = (cb).Get(dr_msg_cb_key); if (v->IsBoolean()) { - dr_msg_cb = Nan::To(v).ToChecked(); + dr_msg_cb = v.As().Value().ToChecked(); } } if (dr_msg_cb) { @@ -475,22 +482,23 @@ Baton Producer::SendOffsetsToTransaction( * * @sa RdKafka::Producer::produce */ -NAN_METHOD(Producer::NodeProduce) { - Nan::HandleScope scope; +Napi::Value Producer::NodeProduce(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); // Need to extract the message data here. if (info.Length() < 3) { // Just throw an exception - return Nan::ThrowError("Need to specify a topic, partition, and message"); + Napi::Error::New(env, "Need to specify a topic, partition, and message").ThrowAsJavaScriptException(); + return env.Null(); } // Second parameter is the partition int32_t partition; - if (info[1]->IsNull() || info[1]->IsUndefined()) { + if (info[1].IsNull() || info[1].IsUndefined()) { partition = RdKafka::Topic::PARTITION_UA; } else { - partition = Nan::To(info[1]).FromJust(); + partition = info[1].As().Int32Value(); } if (partition < 0) { @@ -500,15 +508,16 @@ NAN_METHOD(Producer::NodeProduce) { size_t message_buffer_length; void* message_buffer_data; - if (info[2]->IsNull()) { + if (info[2].IsNull()) { // This is okay for whatever reason message_buffer_length = 0; message_buffer_data = NULL; - } else if (!node::Buffer::HasInstance(info[2])) { - return Nan::ThrowError("Message must be a buffer or null"); + } else if (!info[2].IsBuffer()) { + Napi::Error::New(env, "Message must be a buffer or null").ThrowAsJavaScriptException(); + return env.Null(); } else { - v8::Local message_buffer_object = - (info[2]->ToObject(Nan::GetCurrentContext())).ToLocalChecked(); + Napi::Object message_buffer_object = + (info[2].ToObject(Napi::GetCurrentContext())); // v8 handles the garbage collection here so we need to make a copy of // the buffer or assign the buffer to a persistent handle. @@ -518,15 +527,15 @@ NAN_METHOD(Producer::NodeProduce) { // which should be more memory-efficient and allow v8 to dispose of the // buffer sooner - message_buffer_length = node::Buffer::Length(message_buffer_object); - message_buffer_data = node::Buffer::Data(message_buffer_object); + message_buffer_length = message_buffer_object.As>().Length(); + message_buffer_data = message_buffer_object.As>().Data(); if (message_buffer_data == NULL) { // empty string message buffer should not end up as null message - v8::Local message_buffer_object_emptystring = - Nan::NewBuffer(new char[0], 0).ToLocalChecked(); + Napi::Object message_buffer_object_emptystring = + Napi::Buffer::New(env, new char[0], 0); message_buffer_length = - node::Buffer::Length(message_buffer_object_emptystring); - message_buffer_data = node::Buffer::Data(message_buffer_object_emptystring); // NOLINT + message_buffer_object_emptystring.As>().Length(); + message_buffer_data = message_buffer_object_emptystring.As>().Data(); // NOLINT } } @@ -534,13 +543,13 @@ NAN_METHOD(Producer::NodeProduce) { const void* key_buffer_data; std::string * key = NULL; - if (info[3]->IsNull() || info[3]->IsUndefined()) { + if (info[3].IsNull() || info[3].IsUndefined()) { // This is okay for whatever reason key_buffer_length = 0; key_buffer_data = NULL; - } else if (node::Buffer::HasInstance(info[3])) { - v8::Local key_buffer_object = - (info[3]->ToObject(Nan::GetCurrentContext())).ToLocalChecked(); + } else if (info[3].IsBuffer()) { + Napi::Object key_buffer_object = + (info[3].ToObject(Napi::GetCurrentContext())); // v8 handles the garbage collection here so we need to make a copy of // the buffer or assign the buffer to a persistent handle. @@ -550,20 +559,20 @@ NAN_METHOD(Producer::NodeProduce) { // which should be more memory-efficient and allow v8 to dispose of the // buffer sooner - key_buffer_length = node::Buffer::Length(key_buffer_object); - key_buffer_data = node::Buffer::Data(key_buffer_object); + key_buffer_length = key_buffer_object.As>().Length(); + key_buffer_data = key_buffer_object.As>().Data(); if (key_buffer_data == NULL) { // empty string key buffer should not end up as null key - v8::Local key_buffer_object_emptystring = - Nan::NewBuffer(new char[0], 0).ToLocalChecked(); - key_buffer_length = node::Buffer::Length(key_buffer_object_emptystring); - key_buffer_data = node::Buffer::Data(key_buffer_object_emptystring); + Napi::Object key_buffer_object_emptystring = + Napi::Buffer::New(env, new char[0], 0); + key_buffer_length = key_buffer_object_emptystring.As>().Length(); + key_buffer_data = key_buffer_object_emptystring.As>().Data(); } } else { // If it was a string just use the utf8 value. - v8::Local val = Nan::To(info[3]).ToLocalChecked(); + Napi::String val = info[3].To(); // Get string pointer for this thing - Nan::Utf8String keyUTF8(val); + std::string keyUTF8 = val.As(); key = new std::string(*keyUTF8); key_buffer_data = key->data(); @@ -572,69 +581,72 @@ NAN_METHOD(Producer::NodeProduce) { int64_t timestamp; - if (info.Length() > 4 && !info[4]->IsUndefined() && !info[4]->IsNull()) { - if (!info[4]->IsNumber()) { - return Nan::ThrowError("Timestamp must be a number"); + if (info.Length() > 4 && !info[4].IsUndefined() && !info[4].IsNull()) { + if (!info[4].IsNumber()) { + Napi::Error::New(env, "Timestamp must be a number").ThrowAsJavaScriptException(); + return env.Null(); } - timestamp = Nan::To(info[4]).FromJust(); + timestamp = info[4].As().Int64Value(); } else { timestamp = 0; } void* opaque = NULL; // Opaque handling - if (info.Length() > 5 && !info[5]->IsUndefined()) { + if (info.Length() > 5 && !info[5].IsUndefined()) { // We need to create a persistent handle - opaque = new Nan::Persistent(info[5]); + opaque = new Napi::Persistent(info[5]); // To get the local from this later, - // v8::Local object = Nan::New(persistent); + // Napi::Object object = Napi::New(env, persistent); } std::vector headers; - if (info.Length() > 6 && !info[6]->IsUndefined()) { - v8::Local v8Headers = v8::Local::Cast(info[6]); + if (info.Length() > 6 && !info[6].IsUndefined()) { + Napi::Array v8Headers = info[6].As(); if (v8Headers->Length() >= 1) { for (unsigned int i = 0; i < v8Headers->Length(); i++) { - v8::Local header = Nan::Get(v8Headers, i).ToLocalChecked() - ->ToObject(Nan::GetCurrentContext()).ToLocalChecked(); + Napi::Object header = (v8Headers).Get(i) + ->ToObject(Napi::GetCurrentContext()); if (header.IsEmpty()) { continue; } - v8::Local props = header->GetOwnPropertyNames( - Nan::GetCurrentContext()).ToLocalChecked(); + Napi::Array props = header->GetOwnPropertyNames( + Napi::GetCurrentContext()); // TODO: Other properties in the list of properties should not be // ignored, but they are. This is a bug, need to handle it either in JS // or here. - Nan::MaybeLocal v8Key = - Nan::To(Nan::Get(props, 0).ToLocalChecked()); + Napi::MaybeLocal v8Key = + (props).Get(0.To()); // The key must be a string. if (v8Key.IsEmpty()) { - Nan::ThrowError("Header key must be a string"); + Napi::Error::New(env, "Header key must be a string").ThrowAsJavaScriptException(); + } - Nan::Utf8String uKey(v8Key.ToLocalChecked()); + std::string uKey = v8Key.ToLocalChecked(.As()); std::string key(*uKey); // Valid types for the header are string or buffer. // Other types will throw an error. - v8::Local v8Value = - Nan::Get(header, v8Key.ToLocalChecked()).ToLocalChecked(); + Napi::Value v8Value = + (header).Get(v8Key); - if (node::Buffer::HasInstance(v8Value)) { - const char* value = node::Buffer::Data(v8Value); - const size_t value_len = node::Buffer::Length(v8Value); + if (v8Value.IsBuffer()) { + const char* value = v8Value.As>().Data(); + const size_t value_len = v8Value.As>().Length(); headers.push_back(RdKafka::Headers::Header(key, value, value_len)); - } else if (v8Value->IsString()) { - Nan::Utf8String uValue(v8Value); + } else if (v8Value.IsString()) { + std::string uValue = v8Value.As(); std::string value(*uValue); headers.push_back( RdKafka::Headers::Header(key, value.c_str(), value.size())); } else { - Nan::ThrowError("Header value must be a string or buffer"); + Napi::Error::New(env, "Header value must be a string or buffer").ThrowAsJavaScriptException(); + } } } @@ -645,9 +657,9 @@ NAN_METHOD(Producer::NodeProduce) { // Let the JS library throw if we need to so the error can be more rich int error_code; - if (info[0]->IsString()) { + if (info[0].IsString()) { // Get string pointer for this thing - Nan::Utf8String topicUTF8(Nan::To(info[0]).ToLocalChecked()); + std::string topicUTF8 = info[0].As(.To()); std::string topic_name(*topicUTF8); RdKafka::Headers *rd_headers = RdKafka::Headers::create(headers); @@ -661,7 +673,7 @@ NAN_METHOD(Producer::NodeProduce) { } } else { // First parameter is a topic OBJECT - Topic* topic = ObjectWrap::Unwrap(info[0].As()); + Topic* topic = ObjectWrap::Unwrap(info[0].As()); // Unwrap it and turn it into an RdKafka::Topic* Baton topic_baton = topic->toRDKafkaTopic(producer); @@ -670,7 +682,7 @@ NAN_METHOD(Producer::NodeProduce) { // Let the JS library throw if we need to so the error can be more rich error_code = static_cast(topic_baton.err()); - return info.GetReturnValue().Set(Nan::New(error_code)); + return return Napi::Number::New(env, error_code); } RdKafka::Topic* rd_topic = topic_baton.data(); @@ -689,8 +701,8 @@ NAN_METHOD(Producer::NodeProduce) { // be a delivery report for it, so we have to clean up the opaque // data now, if there was any. - Nan::Persistent *persistent = - static_cast *>(opaque); + Napi::Persistent *persistent = + static_cast *>(opaque); persistent->Reset(); delete persistent; } @@ -699,34 +711,36 @@ NAN_METHOD(Producer::NodeProduce) { delete key; } - info.GetReturnValue().Set(Nan::New(error_code)); + return Napi::Number::New(env, error_code); } -NAN_METHOD(Producer::NodeSetPartitioner) { - Nan::HandleScope scope; +Napi::Value Producer::NodeSetPartitioner(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); - if (info.Length() < 1 || !info[0]->IsFunction()) { + if (info.Length() < 1 || !info[0].IsFunction()) { // Just throw an exception - return Nan::ThrowError("Need to specify a callback"); + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } Producer* producer = ObjectWrap::Unwrap(info.This()); - v8::Local cb = info[0].As(); + Napi::Function cb = info[0].As(); producer->m_partitioner_cb.SetCallback(cb); - info.GetReturnValue().Set(Nan::True()); + return env.True(); } -NAN_METHOD(Producer::NodeConnect) { - Nan::HandleScope scope; +Napi::Value Producer::NodeConnect(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); - if (info.Length() < 1 || !info[0]->IsFunction()) { + if (info.Length() < 1 || !info[0].IsFunction()) { // Just throw an exception - return Nan::ThrowError("Need to specify a callback"); + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } // This needs to be offloaded to libuv - v8::Local cb = info[0].As(); - Nan::Callback *callback = new Nan::Callback(cb); + Napi::Function cb = info[0].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(cb); Producer* producer = ObjectWrap::Unwrap(info.This()); @@ -735,39 +749,41 @@ NAN_METHOD(Producer::NodeConnect) { // We will deactivate them if the connection fails. producer->ActivateDispatchers(); - Nan::AsyncQueueWorker(new Workers::ProducerConnect(callback, producer)); + new Workers::ProducerConnect(callback, producer).Queue(); - info.GetReturnValue().Set(Nan::Null()); + return env.Null(); } -NAN_METHOD(Producer::NodePoll) { - Nan::HandleScope scope; +Napi::Value Producer::NodePoll(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); Producer* producer = ObjectWrap::Unwrap(info.This()); if (!producer->IsConnected()) { - Nan::ThrowError("Producer is disconnected"); + Napi::Error::New(env, "Producer is disconnected").ThrowAsJavaScriptException(); + } else { producer->Poll(); - info.GetReturnValue().Set(Nan::True()); + return env.True(); } } -NAN_METHOD(Producer::NodeSetPollInBackground) { - Nan::HandleScope scope; - if (info.Length() < 1 || !info[0]->IsBoolean()) { +Napi::Value Producer::NodeSetPollInBackground(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); + if (info.Length() < 1 || !info[0].IsBoolean()) { // Just throw an exception - return Nan::ThrowError( + return Napi::ThrowError( "Need to specify a boolean for setting or unsetting"); } - bool set = Nan::To(info[0]).FromJust(); + bool set = info[0].As().Value(); Producer* producer = ObjectWrap::Unwrap(info.This()); Baton b = producer->SetPollInBackground(set); if (b.err() != RdKafka::ERR_NO_ERROR) { - return Nan::ThrowError(b.errstr().c_str()); + Napi::Error::New(env, b.errstr().c_str()).ThrowAsJavaScriptException(); + return env.Null(); } - info.GetReturnValue().Set(b.ToObject()); + return b.ToObject(); } Baton Producer::Flush(int timeout_ms) { @@ -787,156 +803,165 @@ Baton Producer::Flush(int timeout_ms) { return Baton(response_code); } -NAN_METHOD(Producer::NodeFlush) { - Nan::HandleScope scope; +Napi::Value Producer::NodeFlush(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); - if (info.Length() < 2 || !info[1]->IsFunction() || !info[0]->IsNumber()) { + if (info.Length() < 2 || !info[1].IsFunction() || !info[0].IsNumber()) { // Just throw an exception - return Nan::ThrowError("Need to specify a timeout and a callback"); + Napi::Error::New(env, "Need to specify a timeout and a callback").ThrowAsJavaScriptException(); + return env.Null(); } - int timeout_ms = Nan::To(info[0]).FromJust(); + int timeout_ms = info[0].As().Int32Value(); - v8::Local cb = info[1].As(); - Nan::Callback *callback = new Nan::Callback(cb); + Napi::Function cb = info[1].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(cb); Producer* producer = ObjectWrap::Unwrap(info.This()); - Nan::AsyncQueueWorker( + Napi::AsyncQueueWorker( new Workers::ProducerFlush(callback, producer, timeout_ms)); - info.GetReturnValue().Set(Nan::Null()); + return env.Null(); } -NAN_METHOD(Producer::NodeDisconnect) { - Nan::HandleScope scope; +Napi::Value Producer::NodeDisconnect(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); - if (info.Length() < 1 || !info[0]->IsFunction()) { + if (info.Length() < 1 || !info[0].IsFunction()) { // Just throw an exception - return Nan::ThrowError("Need to specify a callback"); + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } - v8::Local cb = info[0].As(); - Nan::Callback *callback = new Nan::Callback(cb); + Napi::Function cb = info[0].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(cb); Producer* producer = ObjectWrap::Unwrap(info.This()); - Nan::AsyncQueueWorker(new Workers::ProducerDisconnect(callback, producer)); + new Workers::ProducerDisconnect(callback, producer).Queue(); - info.GetReturnValue().Set(Nan::Null()); + return env.Null(); } -NAN_METHOD(Producer::NodeInitTransactions) { - Nan::HandleScope scope; +Napi::Value Producer::NodeInitTransactions(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); - if (info.Length() < 2 || !info[1]->IsFunction() || !info[0]->IsNumber()) { - return Nan::ThrowError("Need to specify a timeout and a callback"); + if (info.Length() < 2 || !info[1].IsFunction() || !info[0].IsNumber()) { + Napi::Error::New(env, "Need to specify a timeout and a callback").ThrowAsJavaScriptException(); + return env.Null(); } - int timeout_ms = Nan::To(info[0]).FromJust(); + int timeout_ms = info[0].As().Int32Value(); - v8::Local cb = info[1].As(); - Nan::Callback *callback = new Nan::Callback(cb); + Napi::Function cb = info[1].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(cb); Producer* producer = ObjectWrap::Unwrap(info.This()); - Nan::AsyncQueueWorker( + Napi::AsyncQueueWorker( new Workers::ProducerInitTransactions(callback, producer, timeout_ms)); - info.GetReturnValue().Set(Nan::Null()); + return env.Null(); } -NAN_METHOD(Producer::NodeBeginTransaction) { - Nan::HandleScope scope; +Napi::Value Producer::NodeBeginTransaction(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); - if (info.Length() < 1 || !info[0]->IsFunction()) { - return Nan::ThrowError("Need to specify a callback"); + if (info.Length() < 1 || !info[0].IsFunction()) { + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } - v8::Local cb = info[0].As(); - Nan::Callback *callback = new Nan::Callback(cb); + Napi::Function cb = info[0].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(cb); Producer* producer = ObjectWrap::Unwrap(info.This()); - Nan::AsyncQueueWorker(new Workers::ProducerBeginTransaction(callback, producer)); // NOLINT + new Workers::ProducerBeginTransaction(callback, producer).Queue(); // NOLINT - info.GetReturnValue().Set(Nan::Null()); + return env.Null(); } -NAN_METHOD(Producer::NodeCommitTransaction) { - Nan::HandleScope scope; +Napi::Value Producer::NodeCommitTransaction(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); - if (info.Length() < 2 || !info[1]->IsFunction() || !info[0]->IsNumber()) { - return Nan::ThrowError("Need to specify a timeout and a callback"); + if (info.Length() < 2 || !info[1].IsFunction() || !info[0].IsNumber()) { + Napi::Error::New(env, "Need to specify a timeout and a callback").ThrowAsJavaScriptException(); + return env.Null(); } - int timeout_ms = Nan::To(info[0]).FromJust(); + int timeout_ms = info[0].As().Int32Value(); - v8::Local cb = info[1].As(); - Nan::Callback *callback = new Nan::Callback(cb); + Napi::Function cb = info[1].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(cb); Producer* producer = ObjectWrap::Unwrap(info.This()); - Nan::AsyncQueueWorker( + Napi::AsyncQueueWorker( new Workers::ProducerCommitTransaction(callback, producer, timeout_ms)); - info.GetReturnValue().Set(Nan::Null()); + return env.Null(); } -NAN_METHOD(Producer::NodeAbortTransaction) { - Nan::HandleScope scope; +Napi::Value Producer::NodeAbortTransaction(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); - if (info.Length() < 2 || !info[1]->IsFunction() || !info[0]->IsNumber()) { - return Nan::ThrowError("Need to specify a timeout and a callback"); + if (info.Length() < 2 || !info[1].IsFunction() || !info[0].IsNumber()) { + Napi::Error::New(env, "Need to specify a timeout and a callback").ThrowAsJavaScriptException(); + return env.Null(); } - int timeout_ms = Nan::To(info[0]).FromJust(); + int timeout_ms = info[0].As().Int32Value(); - v8::Local cb = info[1].As(); - Nan::Callback *callback = new Nan::Callback(cb); + Napi::Function cb = info[1].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(cb); Producer* producer = ObjectWrap::Unwrap(info.This()); - Nan::AsyncQueueWorker( + Napi::AsyncQueueWorker( new Workers::ProducerAbortTransaction(callback, producer, timeout_ms)); - info.GetReturnValue().Set(Nan::Null()); + return env.Null(); } -NAN_METHOD(Producer::NodeSendOffsetsToTransaction) { - Nan::HandleScope scope; +Napi::Value Producer::NodeSendOffsetsToTransaction(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); if (info.Length() < 4) { - return Nan::ThrowError( + return Napi::ThrowError( "Need to specify offsets, consumer, timeout for 'send offsets to transaction', and callback"); // NOLINT } - if (!info[0]->IsArray()) { - return Nan::ThrowError( + if (!info[0].IsArray()) { + return Napi::ThrowError( "First argument to 'send offsets to transaction' has to be a consumer object"); // NOLINT } - if (!info[1]->IsObject()) { - Nan::ThrowError("Kafka consumer must be provided"); + if (!info[1].IsObject()) { + Napi::Error::New(env, "Kafka consumer must be provided").ThrowAsJavaScriptException(); + } - if (!info[2]->IsNumber()) { - Nan::ThrowError("Timeout must be provided"); + if (!info[2].IsNumber()) { + Napi::Error::New(env, "Timeout must be provided").ThrowAsJavaScriptException(); + } - if (!info[3]->IsFunction()) { - return Nan::ThrowError("Need to specify a callback"); + if (!info[3].IsFunction()) { + Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); + return env.Null(); } std::vector toppars = - Conversion::TopicPartition::FromV8Array(info[0].As()); + Conversion::TopicPartition::FromV8Array(info[0].As()); NodeKafka::KafkaConsumer* consumer = - ObjectWrap::Unwrap(info[1].As()); - int timeout_ms = Nan::To(info[2]).FromJust(); - v8::Local cb = info[3].As(); - Nan::Callback *callback = new Nan::Callback(cb); + ObjectWrap::Unwrap(info[1].As()); + int timeout_ms = info[2].As().Int32Value(); + Napi::Function cb = info[3].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(cb); Producer* producer = ObjectWrap::Unwrap(info.This()); - Nan::AsyncQueueWorker(new Workers::ProducerSendOffsetsToTransaction( + Napi::AsyncQueueWorker(new Workers::ProducerSendOffsetsToTransaction( callback, producer, toppars, consumer, timeout_ms)); - info.GetReturnValue().Set(Nan::Null()); + return env.Null(); } } // namespace NodeKafka diff --git a/src/producer.h b/src/producer.h index 8df138e8..0a9c9374 100644 --- a/src/producer.h +++ b/src/producer.h @@ -10,8 +10,10 @@ #ifndef SRC_PRODUCER_H_ #define SRC_PRODUCER_H_ -#include -#include +#include +#include +#include +#include #include #include #include @@ -27,7 +29,7 @@ namespace NodeKafka { class ProducerMessage { public: - explicit ProducerMessage(v8::Local, NodeKafka::Topic*); + explicit ProducerMessage(Napi::Object, NodeKafka::Topic*); ~ProducerMessage(); void* Payload(); @@ -49,8 +51,8 @@ class ProducerMessage { class Producer : public Connection { public: - static void Init(v8::Local); - static v8::Local NewInstance(v8::Local); + static void Init(Napi::Object); + static Napi::Object NewInstance(Napi::Value); Baton Connect(); void Disconnect(); @@ -81,7 +83,7 @@ class Producer : public Connection { void DeactivateDispatchers(); void ConfigureCallback(const std::string& string_key, - const v8::Local& cb, bool add) override; + const Napi::Function& cb, bool add) override; Baton InitTransactions(int32_t timeout_ms); Baton BeginTransaction(); @@ -93,27 +95,27 @@ class Producer : public Connection { int timeout_ms); protected: - static Nan::Persistent constructor; - static void New(const Nan::FunctionCallbackInfo&); + static Napi::FunctionReference constructor; + static void New(const Napi::CallbackInfo&); Producer(Conf*, Conf*); ~Producer(); private: - static NAN_METHOD(NodeProduce); - static NAN_METHOD(NodeSetPartitioner); - static NAN_METHOD(NodeConnect); - static NAN_METHOD(NodeDisconnect); - static NAN_METHOD(NodePoll); - static NAN_METHOD(NodeSetPollInBackground); + static Napi::Value NodeProduce(const Napi::CallbackInfo& info); + static Napi::Value NodeSetPartitioner(const Napi::CallbackInfo& info); + static Napi::Value NodeConnect(const Napi::CallbackInfo& info); + static Napi::Value NodeDisconnect(const Napi::CallbackInfo& info); + static Napi::Value NodePoll(const Napi::CallbackInfo& info); + static Napi::Value NodeSetPollInBackground(const Napi::CallbackInfo& info); #if RD_KAFKA_VERSION > 0x00090200 - static NAN_METHOD(NodeFlush); + static Napi::Value NodeFlush(const Napi::CallbackInfo& info); #endif - static NAN_METHOD(NodeInitTransactions); - static NAN_METHOD(NodeBeginTransaction); - static NAN_METHOD(NodeCommitTransaction); - static NAN_METHOD(NodeAbortTransaction); - static NAN_METHOD(NodeSendOffsetsToTransaction); + static Napi::Value NodeInitTransactions(const Napi::CallbackInfo& info); + static Napi::Value NodeBeginTransaction(const Napi::CallbackInfo& info); + static Napi::Value NodeCommitTransaction(const Napi::CallbackInfo& info); + static Napi::Value NodeAbortTransaction(const Napi::CallbackInfo& info); + static Napi::Value NodeSendOffsetsToTransaction(const Napi::CallbackInfo& info); Callbacks::Delivery m_dr_cb; Callbacks::Partitioner m_partitioner_cb; diff --git a/src/topic.cc b/src/topic.cc index 78653c41..d26f02a2 100644 --- a/src/topic.cc +++ b/src/topic.cc @@ -74,57 +74,63 @@ Baton offset_store (int32_t partition, int64_t offset) { */ -Nan::Persistent Topic::constructor; +Napi::FunctionReference Topic::constructor; -void Topic::Init(v8::Local exports) { - Nan::HandleScope scope; +void Topic::Init(Napi::Object exports) { + Napi::HandleScope scope(env); - v8::Local tpl = Nan::New(New); - tpl->SetClassName(Nan::New("Topic").ToLocalChecked()); - tpl->InstanceTemplate()->SetInternalFieldCount(1); + Napi::FunctionReference tpl = Napi::Function::New(env, New); + tpl->SetClassName(Napi::String::New(env, "Topic")); - Nan::SetPrototypeMethod(tpl, "name", NodeGetName); + + InstanceMethod("name", &NodeGetName), // connect. disconnect. resume. pause. get meta data - constructor.Reset((tpl->GetFunction(Nan::GetCurrentContext())) - .ToLocalChecked()); + constructor.Reset((tpl->GetFunction(Napi::GetCurrentContext())) + ); - Nan::Set(exports, Nan::New("Topic").ToLocalChecked(), - tpl->GetFunction(Nan::GetCurrentContext()).ToLocalChecked()); + (exports).Set(Napi::String::New(env, "Topic"), + tpl->GetFunction(Napi::GetCurrentContext())); } -void Topic::New(const Nan::FunctionCallbackInfo& info) { +void Topic::New(const Napi::CallbackInfo& info) { + Napi::Env env = info.Env(); if (!info.IsConstructCall()) { - return Nan::ThrowError("non-constructor invocation not supported"); + Napi::Error::New(env, "non-constructor invocation not supported").ThrowAsJavaScriptException(); + return env.Null(); } if (info.Length() < 1) { - return Nan::ThrowError("topic name is required"); + Napi::Error::New(env, "topic name is required").ThrowAsJavaScriptException(); + return env.Null(); } - if (!info[0]->IsString()) { - return Nan::ThrowError("Topic name must be a string"); + if (!info[0].IsString()) { + Napi::Error::New(env, "Topic name must be a string").ThrowAsJavaScriptException(); + return env.Null(); } RdKafka::Conf* config = NULL; - if (info.Length() >= 2 && !info[1]->IsUndefined() && !info[1]->IsNull()) { + if (info.Length() >= 2 && !info[1].IsUndefined() && !info[1].IsNull()) { // If they gave us two parameters, or the 3rd parameter is null or // undefined, we want to pass null in for the config std::string errstr; - if (!info[1]->IsObject()) { - return Nan::ThrowError("Configuration data must be specified"); + if (!info[1].IsObject()) { + Napi::Error::New(env, "Configuration data must be specified").ThrowAsJavaScriptException(); + return env.Null(); } - config = Conf::create(RdKafka::Conf::CONF_TOPIC, (info[1]->ToObject(Nan::GetCurrentContext())).ToLocalChecked(), errstr); // NOLINT + config = Conf::create(RdKafka::Conf::CONF_TOPIC, (info[1].ToObject(Napi::GetCurrentContext())), errstr); // NOLINT if (!config) { - return Nan::ThrowError(errstr.c_str()); + Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); + return env.Null(); } } - Nan::Utf8String parameterValue(Nan::To(info[0]).ToLocalChecked()); + std::string parameterValue = info[0].As(.To()); std::string topic_name(*parameterValue); Topic* topic = new Topic(topic_name, config); @@ -136,37 +142,38 @@ void Topic::New(const Nan::FunctionCallbackInfo& info) { // basically it sets the configuration data // we don't need to do that because we lazy load it - info.GetReturnValue().Set(info.This()); + return info.This(); } // handle -v8::Local Topic::NewInstance(v8::Local arg) { - Nan::EscapableHandleScope scope; +Napi::Object Topic::NewInstance(Napi::Value arg) { + Napi::Env env = arg.Env(); + Napi::EscapableHandleScope scope(env); const unsigned argc = 1; - v8::Local argv[argc] = { arg }; - v8::Local cons = Nan::New(constructor); - v8::Local instance = - Nan::NewInstance(cons, argc, argv).ToLocalChecked(); + Napi::Value argv[argc] = { arg }; + Napi::Function cons = Napi::Function::New(env, constructor); + Napi::Object instance = + Napi::NewInstance(cons, argc, argv); return scope.Escape(instance); } -NAN_METHOD(Topic::NodeGetName) { - Nan::HandleScope scope; +Napi::Value Topic::NodeGetName(const Napi::CallbackInfo& info) { + Napi::HandleScope scope(env); Topic* topic = ObjectWrap::Unwrap(info.This()); - info.GetReturnValue().Set(Nan::New(topic->name()).ToLocalChecked()); + return Napi::New(env, topic->name()); } -NAN_METHOD(Topic::NodePartitionAvailable) { +Napi::Value Topic::NodePartitionAvailable(const Napi::CallbackInfo& info) { // @TODO(sparente) } -NAN_METHOD(Topic::NodeOffsetStore) { +Napi::Value Topic::NodeOffsetStore(const Napi::CallbackInfo& info) { // @TODO(sparente) } diff --git a/src/topic.h b/src/topic.h index d487d089..085e3800 100644 --- a/src/topic.h +++ b/src/topic.h @@ -10,7 +10,8 @@ #ifndef SRC_TOPIC_H_ #define SRC_TOPIC_H_ -#include +#include +#include #include #include "rdkafkacpp.h" // NOLINT @@ -19,18 +20,18 @@ namespace NodeKafka { -class Topic : public Nan::ObjectWrap { +class Topic : public Napi::ObjectWrap { public: - static void Init(v8::Local); - static v8::Local NewInstance(v8::Local arg); + static void Init(Napi::Object); + static Napi::Object NewInstance(Napi::Value arg); Baton toRDKafkaTopic(Connection *handle); protected: - static Nan::Persistent constructor; - static void New(const Nan::FunctionCallbackInfo& info); + static Napi::FunctionReference constructor; + static void New(const Napi::CallbackInfo& info); - static NAN_METHOD(NodeGetMetadata); + static Napi::Value NodeGetMetadata(const Napi::CallbackInfo& info); // TopicConfig * config_; @@ -44,9 +45,9 @@ class Topic : public Nan::ObjectWrap { std::string m_topic_name; RdKafka::Conf * m_config; - static NAN_METHOD(NodeGetName); - static NAN_METHOD(NodePartitionAvailable); - static NAN_METHOD(NodeOffsetStore); + static Napi::Value NodeGetName(const Napi::CallbackInfo& info); + static Napi::Value NodePartitionAvailable(const Napi::CallbackInfo& info); + static Napi::Value NodeOffsetStore(const Napi::CallbackInfo& info); }; } // namespace NodeKafka diff --git a/src/workers.cc b/src/workers.cc index 4655458d..cd03c529 100644 --- a/src/workers.cc +++ b/src/workers.cc @@ -36,7 +36,7 @@ namespace Handle { * @see RdKafka::KafkaConsumer::Committed */ -OffsetsForTimes::OffsetsForTimes(Nan::Callback *callback, +OffsetsForTimes::OffsetsForTimes(Napi::FunctionReference *callback, Connection* handle, std::vector & t, const int & timeout_ms) : @@ -57,30 +57,30 @@ void OffsetsForTimes::Execute() { } } -void OffsetsForTimes::HandleOKCallback() { - Nan::HandleScope scope; +void OffsetsForTimes::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 2; - v8::Local argv[argc]; + Napi::Value argv[argc]; - argv[0] = Nan::Null(); + argv[0] = env.Null(); argv[1] = Conversion::TopicPartition::ToV8Array(m_topic_partitions); callback->Call(argc, argv); } -void OffsetsForTimes::HandleErrorCallback() { - Nan::HandleScope scope; +void OffsetsForTimes::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { GetErrorObject() }; + Napi::Value argv[argc] = { GetErrorObject() }; callback->Call(argc, argv); } } // namespace Handle ConnectionMetadata::ConnectionMetadata( - Nan::Callback *callback, Connection* connection, + Napi::FunctionReference *callback, Connection* connection, std::string topic, int timeout_ms, bool all_topics) : ErrorAwareWorker(callback), m_connection(connection), @@ -104,13 +104,13 @@ void ConnectionMetadata::Execute() { } } -void ConnectionMetadata::HandleOKCallback() { - Nan::HandleScope scope; +void ConnectionMetadata::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 2; // This is a big one! - v8::Local argv[argc] = { Nan::Null(), + Napi::Value argv[argc] = { env.Null(), Conversion::Metadata::ToV8Object(m_metadata)}; callback->Call(argc, argv); @@ -118,11 +118,11 @@ void ConnectionMetadata::HandleOKCallback() { delete m_metadata; } -void ConnectionMetadata::HandleErrorCallback() { - Nan::HandleScope scope; +void ConnectionMetadata::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { GetErrorObject() }; + Napi::Value argv[argc] = { GetErrorObject() }; callback->Call(argc, argv); } @@ -130,14 +130,14 @@ void ConnectionMetadata::HandleErrorCallback() { /** * @brief Client query watermark offsets worker * - * Easy Nan::AsyncWorker for getting watermark offsets from a broker + * Easy Napi::AsyncWorker for getting watermark offsets from a broker * * @sa RdKafka::Handle::query_watermark_offsets * @sa NodeKafka::Connection::QueryWatermarkOffsets */ ConnectionQueryWatermarkOffsets::ConnectionQueryWatermarkOffsets( - Nan::Callback *callback, Connection* connection, + Napi::FunctionReference *callback, Connection* connection, std::string topic, int32_t partition, int timeout_ms) : ErrorAwareWorker(callback), m_connection(connection), @@ -157,28 +157,28 @@ void ConnectionQueryWatermarkOffsets::Execute() { } } -void ConnectionQueryWatermarkOffsets::HandleOKCallback() { - Nan::HandleScope scope; +void ConnectionQueryWatermarkOffsets::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 2; - v8::Local offsetsObj = Nan::New(); - Nan::Set(offsetsObj, Nan::New("lowOffset").ToLocalChecked(), - Nan::New(m_low_offset)); - Nan::Set(offsetsObj, Nan::New("highOffset").ToLocalChecked(), - Nan::New(m_high_offset)); + Napi::Object offsetsObj = Napi::Object::New(env); + (offsetsObj).Set(Napi::String::New(env, "lowOffset"), + Napi::Number::New(env, m_low_offset)); + (offsetsObj).Set(Napi::String::New(env, "highOffset"), + Napi::Number::New(env, m_high_offset)); // This is a big one! - v8::Local argv[argc] = { Nan::Null(), offsetsObj}; + Napi::Value argv[argc] = { env.Null(), offsetsObj}; callback->Call(argc, argv); } -void ConnectionQueryWatermarkOffsets::HandleErrorCallback() { - Nan::HandleScope scope; +void ConnectionQueryWatermarkOffsets::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { GetErrorObject() }; + Napi::Value argv[argc] = { GetErrorObject() }; callback->Call(argc, argv); } @@ -186,13 +186,13 @@ void ConnectionQueryWatermarkOffsets::HandleErrorCallback() { /** * @brief Producer connect worker. * - * Easy Nan::AsyncWorker for setting up client connections + * Easy Napi::AsyncWorker for setting up client connections * * @sa RdKafka::Producer::connect * @sa NodeKafka::Producer::Connect */ -ProducerConnect::ProducerConnect(Nan::Callback *callback, Producer* producer): +ProducerConnect::ProducerConnect(Napi::FunctionReference *callback, Producer* producer): ErrorAwareWorker(callback), producer(producer) {} @@ -206,27 +206,27 @@ void ProducerConnect::Execute() { } } -void ProducerConnect::HandleOKCallback() { - Nan::HandleScope scope; +void ProducerConnect::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 2; - v8::Local obj = Nan::New(); - Nan::Set(obj, Nan::New("name").ToLocalChecked(), - Nan::New(producer->Name()).ToLocalChecked()); + Napi::Object obj = Napi::Object::New(env); + (obj).Set(Napi::String::New(env, "name"), + Napi::New(env, producer->Name())); - v8::Local argv[argc] = { Nan::Null(), obj}; + Napi::Value argv[argc] = { env.Null(), obj}; callback->Call(argc, argv); } -void ProducerConnect::HandleErrorCallback() { - Nan::HandleScope scope; +void ProducerConnect::OnError() { + Napi::HandleScope scope(env); producer->DeactivateDispatchers(); const unsigned int argc = 1; - v8::Local argv[argc] = { GetErrorObject() }; + Napi::Value argv[argc] = { GetErrorObject() }; callback->Call(argc, argv); } @@ -234,10 +234,10 @@ void ProducerConnect::HandleErrorCallback() { /** * @brief Producer disconnect worker * - * Easy Nan::AsyncWorker for disconnecting from clients + * Easy Napi::AsyncWorker for disconnecting from clients */ -ProducerDisconnect::ProducerDisconnect(Nan::Callback *callback, +ProducerDisconnect::ProducerDisconnect(Napi::FunctionReference *callback, Producer* producer): ErrorAwareWorker(callback), producer(producer) {} @@ -248,11 +248,11 @@ void ProducerDisconnect::Execute() { producer->Disconnect(); } -void ProducerDisconnect::HandleOKCallback() { - Nan::HandleScope scope; +void ProducerDisconnect::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 2; - v8::Local argv[argc] = { Nan::Null(), Nan::True()}; + Napi::Value argv[argc] = { env.Null(), env.True()}; // Deactivate the dispatchers producer->DeactivateDispatchers(); @@ -260,7 +260,7 @@ void ProducerDisconnect::HandleOKCallback() { callback->Call(argc, argv); } -void ProducerDisconnect::HandleErrorCallback() { +void ProducerDisconnect::OnError() { // This should never run assert(0); } @@ -268,10 +268,10 @@ void ProducerDisconnect::HandleErrorCallback() { /** * @brief Producer flush worker * - * Easy Nan::AsyncWorker for flushing a producer. + * Easy Napi::AsyncWorker for flushing a producer. */ -ProducerFlush::ProducerFlush(Nan::Callback *callback, +ProducerFlush::ProducerFlush(Napi::FunctionReference *callback, Producer* producer, int timeout_ms): ErrorAwareWorker(callback), producer(producer), @@ -291,11 +291,11 @@ void ProducerFlush::Execute() { } } -void ProducerFlush::HandleOKCallback() { - Nan::HandleScope scope; +void ProducerFlush::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { Nan::Null() }; + Napi::Value argv[argc] = { env.Null() }; callback->Call(argc, argv); } @@ -303,13 +303,13 @@ void ProducerFlush::HandleOKCallback() { /** * @brief Producer init transactions worker. * - * Easy Nan::AsyncWorker for initiating transactions + * Easy Napi::AsyncWorker for initiating transactions * * @sa RdKafka::Producer::init_transactions * @sa NodeKafka::Producer::InitTransactions */ -ProducerInitTransactions::ProducerInitTransactions(Nan::Callback *callback, +ProducerInitTransactions::ProducerInitTransactions(Napi::FunctionReference *callback, Producer* producer, const int & timeout_ms): ErrorAwareWorker(callback), producer(producer), @@ -325,11 +325,11 @@ void ProducerInitTransactions::Execute() { } } -void ProducerInitTransactions::HandleOKCallback() { - Nan::HandleScope scope; +void ProducerInitTransactions::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { Nan::Null() }; + Napi::Value argv[argc] = { env.Null() }; // Activate the dispatchers producer->ActivateDispatchers(); @@ -337,11 +337,11 @@ void ProducerInitTransactions::HandleOKCallback() { callback->Call(argc, argv); } -void ProducerInitTransactions::HandleErrorCallback() { - Nan::HandleScope scope; +void ProducerInitTransactions::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { m_baton.ToTxnObject() }; + Napi::Value argv[argc] = { m_baton.ToTxnObject() }; callback->Call(argc, argv); } @@ -349,13 +349,13 @@ void ProducerInitTransactions::HandleErrorCallback() { /** * @brief Producer begin transaction worker. * - * Easy Nan::AsyncWorker for begin transaction + * Easy Napi::AsyncWorker for begin transaction * * @sa RdKafka::Producer::begin_transaction * @sa NodeKafka::Producer::BeginTransaction */ -ProducerBeginTransaction::ProducerBeginTransaction(Nan::Callback* callback, +ProducerBeginTransaction::ProducerBeginTransaction(Napi::FunctionReference* callback, Producer* producer) : ErrorAwareWorker(callback), producer(producer) {} @@ -369,12 +369,12 @@ void ProducerBeginTransaction::Execute() { } } -void ProducerBeginTransaction::HandleOKCallback() { - Nan::HandleScope scope; +void ProducerBeginTransaction::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { Nan::Null() }; + Napi::Value argv[argc] = { env.Null() }; // Activate the dispatchers producer->ActivateDispatchers(); @@ -382,11 +382,11 @@ void ProducerBeginTransaction::HandleOKCallback() { callback->Call(argc, argv); } -void ProducerBeginTransaction::HandleErrorCallback() { - Nan::HandleScope scope; +void ProducerBeginTransaction::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { GetErrorObject() }; + Napi::Value argv[argc] = { GetErrorObject() }; callback->Call(argc, argv); } @@ -394,13 +394,13 @@ void ProducerBeginTransaction::HandleErrorCallback() { /** * @brief Producer commit transaction worker. * - * Easy Nan::AsyncWorker for committing transaction + * Easy Napi::AsyncWorker for committing transaction * * @sa RdKafka::Producer::commit_transaction * @sa NodeKafka::Producer::CommitTransaction */ -ProducerCommitTransaction::ProducerCommitTransaction(Nan::Callback *callback, +ProducerCommitTransaction::ProducerCommitTransaction(Napi::FunctionReference *callback, Producer* producer, const int & timeout_ms): ErrorAwareWorker(callback), producer(producer), @@ -416,11 +416,11 @@ void ProducerCommitTransaction::Execute() { } } -void ProducerCommitTransaction::HandleOKCallback() { - Nan::HandleScope scope; +void ProducerCommitTransaction::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { Nan::Null() }; + Napi::Value argv[argc] = { env.Null() }; // Activate the dispatchers producer->ActivateDispatchers(); @@ -428,11 +428,11 @@ void ProducerCommitTransaction::HandleOKCallback() { callback->Call(argc, argv); } -void ProducerCommitTransaction::HandleErrorCallback() { - Nan::HandleScope scope; +void ProducerCommitTransaction::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { m_baton.ToTxnObject() }; + Napi::Value argv[argc] = { m_baton.ToTxnObject() }; callback->Call(argc, argv); } @@ -440,13 +440,13 @@ void ProducerCommitTransaction::HandleErrorCallback() { /** * @brief Producer abort transaction worker. * - * Easy Nan::AsyncWorker for aborting transaction + * Easy Napi::AsyncWorker for aborting transaction * * @sa RdKafka::Producer::abort_transaction * @sa NodeKafka::Producer::AbortTransaction */ -ProducerAbortTransaction::ProducerAbortTransaction(Nan::Callback *callback, +ProducerAbortTransaction::ProducerAbortTransaction(Napi::FunctionReference *callback, Producer* producer, const int & timeout_ms): ErrorAwareWorker(callback), producer(producer), @@ -462,11 +462,11 @@ void ProducerAbortTransaction::Execute() { } } -void ProducerAbortTransaction::HandleOKCallback() { - Nan::HandleScope scope; +void ProducerAbortTransaction::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { Nan::Null() }; + Napi::Value argv[argc] = { env.Null() }; // Activate the dispatchers producer->ActivateDispatchers(); @@ -474,11 +474,11 @@ void ProducerAbortTransaction::HandleOKCallback() { callback->Call(argc, argv); } -void ProducerAbortTransaction::HandleErrorCallback() { - Nan::HandleScope scope; +void ProducerAbortTransaction::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { m_baton.ToTxnObject() }; + Napi::Value argv[argc] = { m_baton.ToTxnObject() }; callback->Call(argc, argv); } @@ -486,14 +486,14 @@ void ProducerAbortTransaction::HandleErrorCallback() { /** * @brief Producer SendOffsetsToTransaction worker. * - * Easy Nan::AsyncWorker for SendOffsetsToTransaction + * Easy Napi::AsyncWorker for SendOffsetsToTransaction * * @sa RdKafka::Producer::send_offsets_to_transaction * @sa NodeKafka::Producer::SendOffsetsToTransaction */ ProducerSendOffsetsToTransaction::ProducerSendOffsetsToTransaction( - Nan::Callback *callback, + Napi::FunctionReference *callback, Producer* producer, std::vector & t, KafkaConsumer* consumer, @@ -515,11 +515,11 @@ void ProducerSendOffsetsToTransaction::Execute() { } } -void ProducerSendOffsetsToTransaction::HandleOKCallback() { - Nan::HandleScope scope; +void ProducerSendOffsetsToTransaction::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { Nan::Null() }; + Napi::Value argv[argc] = { env.Null() }; // Activate the dispatchers producer->ActivateDispatchers(); @@ -527,11 +527,11 @@ void ProducerSendOffsetsToTransaction::HandleOKCallback() { callback->Call(argc, argv); } -void ProducerSendOffsetsToTransaction::HandleErrorCallback() { - Nan::HandleScope scope; +void ProducerSendOffsetsToTransaction::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { m_baton.ToTxnObject() }; + Napi::Value argv[argc] = { m_baton.ToTxnObject() }; callback->Call(argc, argv); } @@ -539,13 +539,13 @@ void ProducerSendOffsetsToTransaction::HandleErrorCallback() { /** * @brief KafkaConsumer connect worker. * - * Easy Nan::AsyncWorker for setting up client connections + * Easy Napi::AsyncWorker for setting up client connections * * @sa RdKafka::KafkaConsumer::connect * @sa NodeKafka::KafkaConsumer::Connect */ -KafkaConsumerConnect::KafkaConsumerConnect(Nan::Callback *callback, +KafkaConsumerConnect::KafkaConsumerConnect(Napi::FunctionReference *callback, KafkaConsumer* consumer): ErrorAwareWorker(callback), consumer(consumer) {} @@ -561,28 +561,28 @@ void KafkaConsumerConnect::Execute() { } } -void KafkaConsumerConnect::HandleOKCallback() { - Nan::HandleScope scope; +void KafkaConsumerConnect::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 2; // Create the object - v8::Local obj = Nan::New(); - Nan::Set(obj, Nan::New("name").ToLocalChecked(), - Nan::New(consumer->Name()).ToLocalChecked()); + Napi::Object obj = Napi::Object::New(env); + (obj).Set(Napi::String::New(env, "name"), + Napi::New(env, consumer->Name())); - v8::Local argv[argc] = { Nan::Null(), obj }; + Napi::Value argv[argc] = { env.Null(), obj }; callback->Call(argc, argv); } -void KafkaConsumerConnect::HandleErrorCallback() { - Nan::HandleScope scope; +void KafkaConsumerConnect::OnError() { + Napi::HandleScope scope(env); consumer->DeactivateDispatchers(); const unsigned int argc = 1; - v8::Local argv[argc] = { Nan::Error(ErrorMessage()) }; + Napi::Value argv[argc] = { Napi::Error::New(env, ErrorMessage()) }; callback->Call(argc, argv); } @@ -590,13 +590,13 @@ void KafkaConsumerConnect::HandleErrorCallback() { /** * @brief KafkaConsumer disconnect worker. * - * Easy Nan::AsyncWorker for disconnecting and cleaning up librdkafka artifacts + * Easy Napi::AsyncWorker for disconnecting and cleaning up librdkafka artifacts * * @sa RdKafka::KafkaConsumer::disconnect * @sa NodeKafka::KafkaConsumer::Disconnect */ -KafkaConsumerDisconnect::KafkaConsumerDisconnect(Nan::Callback *callback, +KafkaConsumerDisconnect::KafkaConsumerDisconnect(Napi::FunctionReference *callback, KafkaConsumer* consumer): ErrorAwareWorker(callback), consumer(consumer) {} @@ -611,22 +611,22 @@ void KafkaConsumerDisconnect::Execute() { } } -void KafkaConsumerDisconnect::HandleOKCallback() { - Nan::HandleScope scope; +void KafkaConsumerDisconnect::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 2; - v8::Local argv[argc] = { Nan::Null(), Nan::True() }; + Napi::Value argv[argc] = { env.Null(), env.True() }; consumer->DeactivateDispatchers(); callback->Call(argc, argv); } -void KafkaConsumerDisconnect::HandleErrorCallback() { - Nan::HandleScope scope; +void KafkaConsumerDisconnect::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { GetErrorObject() }; + Napi::Value argv[argc] = { GetErrorObject() }; consumer->DeactivateDispatchers(); @@ -636,7 +636,7 @@ void KafkaConsumerDisconnect::HandleErrorCallback() { /** * @brief KafkaConsumer get messages worker. * - * A more complex Nan::AsyncProgressWorker. I made a custom superclass to deal + * A more complex Napi::AsyncProgressWorker. I made a custom superclass to deal * with more real-time progress points. Instead of using ProgressWorker, which * is not time sensitive, this custom worker will poll using libuv and send * data back to v8 as it comes available without missing any @@ -654,7 +654,7 @@ void KafkaConsumerDisconnect::HandleErrorCallback() { * @sa NodeKafka::KafkaConsumer::GetMessage */ -KafkaConsumerConsumeLoop::KafkaConsumerConsumeLoop(Nan::Callback *callback, +KafkaConsumerConsumeLoop::KafkaConsumerConsumeLoop(Napi::FunctionReference *callback, KafkaConsumer* consumer, const int & timeout_ms, const int & timeout_sleep_delay_ms) : @@ -731,36 +731,36 @@ void KafkaConsumerConsumeLoop::ConsumeLoop(void* arg) { void KafkaConsumerConsumeLoop::HandleMessageCallback(RdKafka::Message* msg, RdKafka::ErrorCode ec) { - Nan::HandleScope scope; + Napi::HandleScope scope(env); const unsigned int argc = 4; - v8::Local argv[argc]; + Napi::Value argv[argc]; - argv[0] = Nan::Null(); + argv[0] = env.Null(); if (msg == NULL) { - argv[1] = Nan::Null(); - argv[2] = Nan::Null(); - argv[3] = Nan::New(ec); + argv[1] = env.Null(); + argv[2] = env.Null(); + argv[3] = Napi::Number::New(env, ec); } else { - argv[3] = Nan::Null(); + argv[3] = env.Null(); switch (msg->err()) { case RdKafka::ERR__PARTITION_EOF: { - argv[1] = Nan::Null(); - v8::Local eofEvent = Nan::New(); + argv[1] = env.Null(); + Napi::Object eofEvent = Napi::Object::New(env); - Nan::Set(eofEvent, Nan::New("topic").ToLocalChecked(), - Nan::New(msg->topic_name()).ToLocalChecked()); - Nan::Set(eofEvent, Nan::New("offset").ToLocalChecked(), - Nan::New(msg->offset())); - Nan::Set(eofEvent, Nan::New("partition").ToLocalChecked(), - Nan::New(msg->partition())); + (eofEvent).Set(Napi::String::New(env, "topic"), + Napi::String::New(env, msg->topic_name())); + (eofEvent).Set(Napi::String::New(env, "offset"), + Napi::Number::New(env, msg->offset())); + (eofEvent).Set(Napi::String::New(env, "partition"), + Napi::Number::New(env, msg->partition())); argv[2] = eofEvent; break; } default: argv[1] = Conversion::Message::ToV8Object(msg); - argv[2] = Nan::Null(); + argv[2] = env.Null(); break; } @@ -771,15 +771,15 @@ void KafkaConsumerConsumeLoop::HandleMessageCallback(RdKafka::Message* msg, callback->Call(argc, argv); } -void KafkaConsumerConsumeLoop::HandleOKCallback() { - Nan::HandleScope scope; +void KafkaConsumerConsumeLoop::OnOK() { + Napi::HandleScope scope(env); } -void KafkaConsumerConsumeLoop::HandleErrorCallback() { - Nan::HandleScope scope; +void KafkaConsumerConsumeLoop::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { Nan::Error(ErrorMessage()) }; + Napi::Value argv[argc] = { Napi::Error::New(env, ErrorMessage()) }; callback->Call(argc, argv); } @@ -795,7 +795,7 @@ void KafkaConsumerConsumeLoop::HandleErrorCallback() { * @see NodeKafka::KafkaConsumer::GetMessage */ -KafkaConsumerConsumeNum::KafkaConsumerConsumeNum(Nan::Callback *callback, +KafkaConsumerConsumeNum::KafkaConsumerConsumeNum(Napi::FunctionReference *callback, KafkaConsumer* consumer, const uint32_t & num_messages, const int & timeout_ms, @@ -870,14 +870,14 @@ void KafkaConsumerConsumeNum::Execute() { } } -void KafkaConsumerConsumeNum::HandleOKCallback() { - Nan::HandleScope scope; +void KafkaConsumerConsumeNum::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 3; - v8::Local argv[argc]; - argv[0] = Nan::Null(); + Napi::Value argv[argc]; + argv[0] = env.Null(); - v8::Local returnArray = Nan::New(); - v8::Local eofEventsArray = Nan::New(); + Napi::Array returnArray = Napi::Array::New(env); + Napi::Array eofEventsArray = Napi::Array::New(env); if (m_messages.size() > 0) { int returnArrayIndex = -1; @@ -889,30 +889,29 @@ void KafkaConsumerConsumeNum::HandleOKCallback() { switch (message->err()) { case RdKafka::ERR_NO_ERROR: ++returnArrayIndex; - Nan::Set(returnArray, returnArrayIndex, + (returnArray).Set(returnArrayIndex, Conversion::Message::ToV8Object(message)); break; case RdKafka::ERR__PARTITION_EOF: ++eofEventsArrayIndex; // create EOF event - v8::Local eofEvent = Nan::New(); + Napi::Object eofEvent = Napi::Object::New(env); - Nan::Set(eofEvent, Nan::New("topic").ToLocalChecked(), - Nan::New(message->topic_name()).ToLocalChecked()); - Nan::Set(eofEvent, Nan::New("offset").ToLocalChecked(), - Nan::New(message->offset())); - Nan::Set(eofEvent, Nan::New("partition").ToLocalChecked(), - Nan::New(message->partition())); + (eofEvent).Set(Napi::String::New(env, "topic"), + Napi::String::New(env, message->topic_name())); + (eofEvent).Set(Napi::String::New(env, "offset"), + Napi::Number::New(env, message->offset())); + (eofEvent).Set(Napi::String::New(env, "partition"), + Napi::Number::New(env, message->partition())); // also store index at which position in the message array this event // was emitted this way, we can later emit it at the right point in // time - Nan::Set(eofEvent, - Nan::New("messageIndex").ToLocalChecked(), - Nan::New(returnArrayIndex)); + (eofEvent).Set(Napi::String::New(env, "messageIndex"), + Napi::Number::New(env, returnArrayIndex)); - Nan::Set(eofEventsArray, eofEventsArrayIndex, eofEvent); + (eofEventsArray).Set(eofEventsArrayIndex, eofEvent); } delete message; @@ -925,8 +924,8 @@ void KafkaConsumerConsumeNum::HandleOKCallback() { callback->Call(argc, argv); } -void KafkaConsumerConsumeNum::HandleErrorCallback() { - Nan::HandleScope scope; +void KafkaConsumerConsumeNum::OnError() { + Napi::HandleScope scope(env); if (m_messages.size() > 0) { for (std::vector::iterator it = m_messages.begin(); @@ -937,7 +936,7 @@ void KafkaConsumerConsumeNum::HandleErrorCallback() { } const unsigned int argc = 1; - v8::Local argv[argc] = { GetErrorObject() }; + Napi::Value argv[argc] = { GetErrorObject() }; callback->Call(argc, argv); } @@ -953,7 +952,7 @@ void KafkaConsumerConsumeNum::HandleErrorCallback() { * @see NodeKafka::KafkaConsumer::GetMessage */ -KafkaConsumerConsume::KafkaConsumerConsume(Nan::Callback *callback, +KafkaConsumerConsume::KafkaConsumerConsume(Napi::FunctionReference *callback, KafkaConsumer* consumer, const int & timeout_ms) : ErrorAwareWorker(callback), @@ -977,13 +976,13 @@ void KafkaConsumerConsume::Execute() { } } -void KafkaConsumerConsume::HandleOKCallback() { - Nan::HandleScope scope; +void KafkaConsumerConsume::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 2; - v8::Local argv[argc]; + Napi::Value argv[argc]; - argv[0] = Nan::Null(); + argv[0] = env.Null(); argv[1] = Conversion::Message::ToV8Object(m_message); delete m_message; @@ -991,11 +990,11 @@ void KafkaConsumerConsume::HandleOKCallback() { callback->Call(argc, argv); } -void KafkaConsumerConsume::HandleErrorCallback() { - Nan::HandleScope scope; +void KafkaConsumerConsume::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { GetErrorObject() }; + Napi::Value argv[argc] = { GetErrorObject() }; callback->Call(argc, argv); } @@ -1010,7 +1009,7 @@ void KafkaConsumerConsume::HandleErrorCallback() { * @see RdKafka::KafkaConsumer::Committed */ -KafkaConsumerCommitted::KafkaConsumerCommitted(Nan::Callback *callback, +KafkaConsumerCommitted::KafkaConsumerCommitted(Napi::FunctionReference *callback, KafkaConsumer* consumer, std::vector & t, const int & timeout_ms) : @@ -1031,23 +1030,23 @@ void KafkaConsumerCommitted::Execute() { } } -void KafkaConsumerCommitted::HandleOKCallback() { - Nan::HandleScope scope; +void KafkaConsumerCommitted::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 2; - v8::Local argv[argc]; + Napi::Value argv[argc]; - argv[0] = Nan::Null(); + argv[0] = env.Null(); argv[1] = Conversion::TopicPartition::ToV8Array(m_topic_partitions); callback->Call(argc, argv); } -void KafkaConsumerCommitted::HandleErrorCallback() { - Nan::HandleScope scope; +void KafkaConsumerCommitted::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { GetErrorObject() }; + Napi::Value argv[argc] = { GetErrorObject() }; callback->Call(argc, argv); } @@ -1059,7 +1058,7 @@ void KafkaConsumerCommitted::HandleErrorCallback() { * * @see RdKafka::KafkaConsumer::commitSync */ -KafkaConsumerCommitCb::KafkaConsumerCommitCb(Nan::Callback *callback, +KafkaConsumerCommitCb::KafkaConsumerCommitCb(Napi::FunctionReference *callback, KafkaConsumer* consumer, std::optional> & t) : ErrorAwareWorker(callback), @@ -1084,22 +1083,22 @@ void KafkaConsumerCommitCb::Execute() { } } -void KafkaConsumerCommitCb::HandleOKCallback() { - Nan::HandleScope scope; +void KafkaConsumerCommitCb::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc]; + Napi::Value argv[argc]; - argv[0] = Nan::Null(); + argv[0] = env.Null(); callback->Call(argc, argv); } -void KafkaConsumerCommitCb::HandleErrorCallback() { - Nan::HandleScope scope; +void KafkaConsumerCommitCb::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { GetErrorObject() }; + Napi::Value argv[argc] = { GetErrorObject() }; callback->Call(argc, argv); } @@ -1116,7 +1115,7 @@ void KafkaConsumerCommitCb::HandleErrorCallback() { * seek to work. Use assign() to set the starting offset. */ -KafkaConsumerSeek::KafkaConsumerSeek(Nan::Callback *callback, +KafkaConsumerSeek::KafkaConsumerSeek(Napi::FunctionReference *callback, KafkaConsumer* consumer, const RdKafka::TopicPartition * toppar, const int & timeout_ms) : @@ -1146,22 +1145,22 @@ void KafkaConsumerSeek::Execute() { } } -void KafkaConsumerSeek::HandleOKCallback() { - Nan::HandleScope scope; +void KafkaConsumerSeek::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc]; + Napi::Value argv[argc]; - argv[0] = Nan::Null(); + argv[0] = env.Null(); callback->Call(argc, argv); } -void KafkaConsumerSeek::HandleErrorCallback() { - Nan::HandleScope scope; +void KafkaConsumerSeek::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { GetErrorObject() }; + Napi::Value argv[argc] = { GetErrorObject() }; callback->Call(argc, argv); } @@ -1172,7 +1171,7 @@ void KafkaConsumerSeek::HandleErrorCallback() { * This callback will create a topic * */ -AdminClientCreateTopic::AdminClientCreateTopic(Nan::Callback *callback, +AdminClientCreateTopic::AdminClientCreateTopic(Napi::FunctionReference *callback, AdminClient* client, rd_kafka_NewTopic_t* topic, const int & timeout_ms) : @@ -1193,22 +1192,22 @@ void AdminClientCreateTopic::Execute() { } } -void AdminClientCreateTopic::HandleOKCallback() { - Nan::HandleScope scope; +void AdminClientCreateTopic::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc]; + Napi::Value argv[argc]; - argv[0] = Nan::Null(); + argv[0] = env.Null(); callback->Call(argc, argv); } -void AdminClientCreateTopic::HandleErrorCallback() { - Nan::HandleScope scope; +void AdminClientCreateTopic::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { GetErrorObject() }; + Napi::Value argv[argc] = { GetErrorObject() }; callback->Call(argc, argv); } @@ -1219,7 +1218,7 @@ void AdminClientCreateTopic::HandleErrorCallback() { * This callback will delete a topic * */ -AdminClientDeleteTopic::AdminClientDeleteTopic(Nan::Callback *callback, +AdminClientDeleteTopic::AdminClientDeleteTopic(Napi::FunctionReference *callback, AdminClient* client, rd_kafka_DeleteTopic_t* topic, const int & timeout_ms) : @@ -1240,22 +1239,22 @@ void AdminClientDeleteTopic::Execute() { } } -void AdminClientDeleteTopic::HandleOKCallback() { - Nan::HandleScope scope; +void AdminClientDeleteTopic::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc]; + Napi::Value argv[argc]; - argv[0] = Nan::Null(); + argv[0] = env.Null(); callback->Call(argc, argv); } -void AdminClientDeleteTopic::HandleErrorCallback() { - Nan::HandleScope scope; +void AdminClientDeleteTopic::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { GetErrorObject() }; + Napi::Value argv[argc] = { GetErrorObject() }; callback->Call(argc, argv); } @@ -1267,7 +1266,7 @@ void AdminClientDeleteTopic::HandleErrorCallback() { * */ AdminClientCreatePartitions::AdminClientCreatePartitions( - Nan::Callback *callback, + Napi::FunctionReference *callback, AdminClient* client, rd_kafka_NewPartitions_t* partitions, const int & timeout_ms) : @@ -1288,22 +1287,22 @@ void AdminClientCreatePartitions::Execute() { } } -void AdminClientCreatePartitions::HandleOKCallback() { - Nan::HandleScope scope; +void AdminClientCreatePartitions::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc]; + Napi::Value argv[argc]; - argv[0] = Nan::Null(); + argv[0] = env.Null(); callback->Call(argc, argv); } -void AdminClientCreatePartitions::HandleErrorCallback() { - Nan::HandleScope scope; +void AdminClientCreatePartitions::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = { GetErrorObject() }; + Napi::Value argv[argc] = { GetErrorObject() }; callback->Call(argc, argv); } @@ -1315,7 +1314,7 @@ void AdminClientCreatePartitions::HandleErrorCallback() { * */ AdminClientListGroups::AdminClientListGroups( - Nan::Callback* callback, AdminClient* client, bool is_match_states_set, + Napi::FunctionReference* callback, AdminClient* client, bool is_match_states_set, std::vector& match_states, const int& timeout_ms) : ErrorAwareWorker(callback), @@ -1338,13 +1337,13 @@ void AdminClientListGroups::Execute() { } } -void AdminClientListGroups::HandleOKCallback() { - Nan::HandleScope scope; +void AdminClientListGroups::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 2; - v8::Local argv[argc]; + Napi::Value argv[argc]; - argv[0] = Nan::Null(); + argv[0] = env.Null(); const rd_kafka_ListConsumerGroups_result_t* result = rd_kafka_event_ListConsumerGroups_result(m_event_response); @@ -1354,11 +1353,11 @@ void AdminClientListGroups::HandleOKCallback() { callback->Call(argc, argv); } -void AdminClientListGroups::HandleErrorCallback() { - Nan::HandleScope scope; +void AdminClientListGroups::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = {GetErrorObject()}; + Napi::Value argv[argc] = {GetErrorObject()}; callback->Call(argc, argv); } @@ -1370,7 +1369,7 @@ void AdminClientListGroups::HandleErrorCallback() { * */ AdminClientDescribeGroups::AdminClientDescribeGroups( - Nan::Callback* callback, NodeKafka::AdminClient* client, + Napi::FunctionReference* callback, NodeKafka::AdminClient* client, std::vector& groups, bool include_authorized_operations, const int& timeout_ms) : ErrorAwareWorker(callback), @@ -1393,24 +1392,24 @@ void AdminClientDescribeGroups::Execute() { } } -void AdminClientDescribeGroups::HandleOKCallback() { - Nan::HandleScope scope; +void AdminClientDescribeGroups::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 2; - v8::Local argv[argc]; + Napi::Value argv[argc]; - argv[0] = Nan::Null(); + argv[0] = env.Null(); argv[1] = Conversion::Admin::FromDescribeConsumerGroupsResult( rd_kafka_event_DescribeConsumerGroups_result(m_event_response)); callback->Call(argc, argv); } -void AdminClientDescribeGroups::HandleErrorCallback() { - Nan::HandleScope scope; +void AdminClientDescribeGroups::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = {GetErrorObject()}; + Napi::Value argv[argc] = {GetErrorObject()}; callback->Call(argc, argv); } @@ -1422,7 +1421,7 @@ void AdminClientDescribeGroups::HandleErrorCallback() { * */ AdminClientDeleteGroups::AdminClientDeleteGroups( - Nan::Callback* callback, NodeKafka::AdminClient* client, + Napi::FunctionReference* callback, NodeKafka::AdminClient* client, rd_kafka_DeleteGroup_t **group_list, size_t group_cnt, const int& timeout_ms) @@ -1451,24 +1450,24 @@ void AdminClientDeleteGroups::Execute() { } } -void AdminClientDeleteGroups::HandleOKCallback() { - Nan::HandleScope scope; +void AdminClientDeleteGroups::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 2; - v8::Local argv[argc]; + Napi::Value argv[argc]; - argv[0] = Nan::Null(); + argv[0] = env.Null(); argv[1] = Conversion::Admin::FromDeleteGroupsResult( rd_kafka_event_DeleteGroups_result(m_event_response)); callback->Call(argc, argv); } -void AdminClientDeleteGroups::HandleErrorCallback() { - Nan::HandleScope scope; +void AdminClientDeleteGroups::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = {GetErrorObject()}; + Napi::Value argv[argc] = {GetErrorObject()}; callback->Call(argc, argv); } @@ -1481,7 +1480,7 @@ void AdminClientDeleteGroups::HandleErrorCallback() { * */ AdminClientListConsumerGroupOffsets::AdminClientListConsumerGroupOffsets( - Nan::Callback* callback, NodeKafka::AdminClient* client, + Napi::FunctionReference* callback, NodeKafka::AdminClient* client, rd_kafka_ListConsumerGroupOffsets_t **req, size_t req_cnt, const bool require_stable_offsets, @@ -1513,23 +1512,23 @@ void AdminClientListConsumerGroupOffsets::Execute() { } } -void AdminClientListConsumerGroupOffsets::HandleOKCallback() { - Nan::HandleScope scope; +void AdminClientListConsumerGroupOffsets::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 2; - v8::Local argv[argc]; + Napi::Value argv[argc]; - argv[0] = Nan::Null(); + argv[0] = env.Null(); argv[1] = Conversion::Admin::FromListConsumerGroupOffsetsResult( rd_kafka_event_ListConsumerGroupOffsets_result(m_event_response)); callback->Call(argc, argv); } -void AdminClientListConsumerGroupOffsets::HandleErrorCallback() { - Nan::HandleScope scope; +void AdminClientListConsumerGroupOffsets::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = {GetErrorObject()}; + Napi::Value argv[argc] = {GetErrorObject()}; callback->Call(argc, argv); } @@ -1542,7 +1541,7 @@ void AdminClientListConsumerGroupOffsets::HandleErrorCallback() { * */ AdminClientDeleteRecords::AdminClientDeleteRecords( - Nan::Callback* callback, NodeKafka::AdminClient* client, + Napi::FunctionReference* callback, NodeKafka::AdminClient* client, rd_kafka_DeleteRecords_t **del_records, size_t del_records_cnt, const int& operation_timeout_ms, @@ -1574,24 +1573,24 @@ void AdminClientDeleteRecords::Execute() { } } -void AdminClientDeleteRecords::HandleOKCallback() { - Nan::HandleScope scope; +void AdminClientDeleteRecords::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 2; - v8::Local argv[argc]; + Napi::Value argv[argc]; - argv[0] = Nan::Null(); + argv[0] = env.Null(); argv[1] = Conversion::Admin::FromDeleteRecordsResult( rd_kafka_event_DeleteRecords_result(m_event_response)); callback->Call(argc, argv); } -void AdminClientDeleteRecords::HandleErrorCallback() { - Nan::HandleScope scope; +void AdminClientDeleteRecords::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = {GetErrorObject()}; + Napi::Value argv[argc] = {GetErrorObject()}; callback->Call(argc, argv); } @@ -1602,7 +1601,7 @@ void AdminClientDeleteRecords::HandleErrorCallback() { * This callback will describe topics. */ AdminClientDescribeTopics::AdminClientDescribeTopics( - Nan::Callback* callback, NodeKafka::AdminClient* client, + Napi::FunctionReference* callback, NodeKafka::AdminClient* client, rd_kafka_TopicCollection_t* topics, const bool include_authorized_operations, const int& timeout_ms) @@ -1630,23 +1629,23 @@ void AdminClientDescribeTopics::Execute() { } } -void AdminClientDescribeTopics::HandleOKCallback() { - Nan::HandleScope scope; +void AdminClientDescribeTopics::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 2; - v8::Local argv[argc]; + Napi::Value argv[argc]; - argv[0] = Nan::Null(); + argv[0] = env.Null(); argv[1] = Conversion::Admin::FromDescribeTopicsResult( rd_kafka_event_DescribeTopics_result(m_event_response)); callback->Call(argc, argv); } -void AdminClientDescribeTopics::HandleErrorCallback() { - Nan::HandleScope scope; +void AdminClientDescribeTopics::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = {GetErrorObject()}; + Napi::Value argv[argc] = {GetErrorObject()}; callback->Call(argc, argv); } @@ -1657,7 +1656,7 @@ void AdminClientDescribeTopics::HandleErrorCallback() { * This callback will list requested offsets for the specified topic partitions. */ AdminClientListOffsets::AdminClientListOffsets( - Nan::Callback* callback, NodeKafka::AdminClient* client, + Napi::FunctionReference* callback, NodeKafka::AdminClient* client, rd_kafka_topic_partition_list_t* partitions, const int& timeout_ms, rd_kafka_IsolationLevel_t isolation_level) : ErrorAwareWorker(callback), @@ -1684,23 +1683,23 @@ void AdminClientListOffsets::Execute() { } } -void AdminClientListOffsets::HandleOKCallback() { - Nan::HandleScope scope; +void AdminClientListOffsets::OnOK() { + Napi::HandleScope scope(env); const unsigned int argc = 2; - v8::Local argv[argc]; + Napi::Value argv[argc]; - argv[0] = Nan::Null(); + argv[0] = env.Null(); argv[1] = Conversion::Admin::FromListOffsetsResult( rd_kafka_event_ListOffsets_result(m_event_response)); callback->Call(argc, argv); } -void AdminClientListOffsets::HandleErrorCallback() { - Nan::HandleScope scope; +void AdminClientListOffsets::OnError() { + Napi::HandleScope scope(env); const unsigned int argc = 1; - v8::Local argv[argc] = {GetErrorObject()}; + Napi::Value argv[argc] = {GetErrorObject()}; callback->Call(argc, argv); } diff --git a/src/workers.h b/src/workers.h index b9583823..9fad8c20 100644 --- a/src/workers.h +++ b/src/workers.h @@ -12,7 +12,8 @@ #define SRC_WORKERS_H_ #include -#include +#include +#include #include #include #include @@ -26,25 +27,25 @@ namespace NodeKafka { namespace Workers { -class ErrorAwareWorker : public Nan::AsyncWorker { +class ErrorAwareWorker : public Napi::AsyncWorker { public: - explicit ErrorAwareWorker(Nan::Callback* callback_) : - Nan::AsyncWorker(callback_), + explicit ErrorAwareWorker(Napi::FunctionReference* callback_) : + Napi::AsyncWorker(callback_), m_baton(RdKafka::ERR_NO_ERROR) {} virtual ~ErrorAwareWorker() {} virtual void Execute() = 0; - virtual void HandleOKCallback() = 0; - void HandleErrorCallback() { - Nan::HandleScope scope; + virtual void OnOK() = 0; + void OnError() { + Napi::HandleScope scope(env); // Construct error and add code to it. - v8::Local error = Nan::Error(ErrorMessage()); - Nan::Set(error.As(), Nan::New("code").ToLocalChecked(), - Nan::New(GetErrorCode())); + Napi::Value error = Napi::Error::New(env, ErrorMessage()); + (error.As()).Set(Napi::String::New(env, "code"), + Napi::New(env, GetErrorCode())); const unsigned int argc = 1; - v8::Local argv[argc] = { error }; + Napi::Value argv[argc] = { error }; callback->Call(argc, argv); } @@ -66,7 +67,7 @@ class ErrorAwareWorker : public Nan::AsyncWorker { return m_baton.err(); } - v8::Local GetErrorObject() { + Napi::Object GetErrorObject() { return m_baton.ToObject(); } @@ -75,7 +76,7 @@ class ErrorAwareWorker : public Nan::AsyncWorker { class MessageWorker : public ErrorAwareWorker { public: - explicit MessageWorker(Nan::Callback* callback_) + explicit MessageWorker(Napi::FunctionReference* callback_) : ErrorAwareWorker(callback_), m_asyncdata() { m_async = new uv_async_t; uv_async_init( @@ -158,12 +159,12 @@ class MessageWorker : public ErrorAwareWorker { uv_async_send(m_async); } - NAN_INLINE static NAUV_WORK_CB(m_async_message) { + inline static NAUV_WORK_CB(m_async_message) { MessageWorker *worker = static_cast(async->data); worker->WorkMessage(); } - NAN_INLINE static void AsyncClose_(uv_handle_t* handle) { + inline static void AsyncClose_(uv_handle_t* handle) { MessageWorker *worker = static_cast(handle->data); delete reinterpret_cast(handle); delete worker; @@ -178,14 +179,14 @@ class MessageWorker : public ErrorAwareWorker { namespace Handle { class OffsetsForTimes : public ErrorAwareWorker { public: - OffsetsForTimes(Nan::Callback*, NodeKafka::Connection*, + OffsetsForTimes(Napi::FunctionReference*, NodeKafka::Connection*, std::vector &, const int &); ~OffsetsForTimes(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::Connection * m_handle; @@ -196,13 +197,13 @@ class OffsetsForTimes : public ErrorAwareWorker { class ConnectionMetadata : public ErrorAwareWorker { public: - ConnectionMetadata(Nan::Callback*, NodeKafka::Connection*, + ConnectionMetadata(Napi::FunctionReference*, NodeKafka::Connection*, std::string, int, bool); ~ConnectionMetadata(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::Connection * m_connection; @@ -215,13 +216,13 @@ class ConnectionMetadata : public ErrorAwareWorker { class ConnectionQueryWatermarkOffsets : public ErrorAwareWorker { public: - ConnectionQueryWatermarkOffsets(Nan::Callback*, NodeKafka::Connection*, + ConnectionQueryWatermarkOffsets(Napi::FunctionReference*, NodeKafka::Connection*, std::string, int32_t, int); ~ConnectionQueryWatermarkOffsets(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::Connection * m_connection; @@ -235,12 +236,12 @@ class ConnectionQueryWatermarkOffsets : public ErrorAwareWorker { class ProducerConnect : public ErrorAwareWorker { public: - ProducerConnect(Nan::Callback*, NodeKafka::Producer*); + ProducerConnect(Napi::FunctionReference*, NodeKafka::Producer*); ~ProducerConnect(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::Producer * producer; @@ -248,12 +249,12 @@ class ProducerConnect : public ErrorAwareWorker { class ProducerDisconnect : public ErrorAwareWorker { public: - ProducerDisconnect(Nan::Callback*, NodeKafka::Producer*); + ProducerDisconnect(Napi::FunctionReference*, NodeKafka::Producer*); ~ProducerDisconnect(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::Producer * producer; @@ -261,11 +262,11 @@ class ProducerDisconnect : public ErrorAwareWorker { class ProducerFlush : public ErrorAwareWorker { public: - ProducerFlush(Nan::Callback*, NodeKafka::Producer*, int); + ProducerFlush(Napi::FunctionReference*, NodeKafka::Producer*, int); ~ProducerFlush(); void Execute(); - void HandleOKCallback(); + void OnOK(); private: NodeKafka::Producer * producer; @@ -274,12 +275,12 @@ class ProducerFlush : public ErrorAwareWorker { class ProducerInitTransactions : public ErrorAwareWorker { public: - ProducerInitTransactions(Nan::Callback*, NodeKafka::Producer*, const int &); + ProducerInitTransactions(Napi::FunctionReference*, NodeKafka::Producer*, const int &); ~ProducerInitTransactions(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::Producer * producer; @@ -288,12 +289,12 @@ class ProducerInitTransactions : public ErrorAwareWorker { class ProducerBeginTransaction : public ErrorAwareWorker { public: - ProducerBeginTransaction(Nan::Callback*, NodeKafka::Producer*); + ProducerBeginTransaction(Napi::FunctionReference*, NodeKafka::Producer*); ~ProducerBeginTransaction(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::Producer * producer; @@ -301,12 +302,12 @@ class ProducerBeginTransaction : public ErrorAwareWorker { class ProducerCommitTransaction : public ErrorAwareWorker { public: - ProducerCommitTransaction(Nan::Callback*, NodeKafka::Producer*, const int &); + ProducerCommitTransaction(Napi::FunctionReference*, NodeKafka::Producer*, const int &); ~ProducerCommitTransaction(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::Producer * producer; @@ -315,12 +316,12 @@ class ProducerCommitTransaction : public ErrorAwareWorker { class ProducerAbortTransaction : public ErrorAwareWorker { public: - ProducerAbortTransaction(Nan::Callback*, NodeKafka::Producer*, const int &); + ProducerAbortTransaction(Napi::FunctionReference*, NodeKafka::Producer*, const int &); ~ProducerAbortTransaction(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::Producer * producer; @@ -330,15 +331,15 @@ class ProducerAbortTransaction : public ErrorAwareWorker { class ProducerSendOffsetsToTransaction : public ErrorAwareWorker { public: ProducerSendOffsetsToTransaction( - Nan::Callback*, NodeKafka::Producer*, + Napi::FunctionReference*, NodeKafka::Producer*, std::vector &, KafkaConsumer*, const int &); ~ProducerSendOffsetsToTransaction(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::Producer * producer; @@ -349,12 +350,12 @@ class ProducerSendOffsetsToTransaction : public ErrorAwareWorker { class KafkaConsumerConnect : public ErrorAwareWorker { public: - KafkaConsumerConnect(Nan::Callback*, NodeKafka::KafkaConsumer*); + KafkaConsumerConnect(Napi::FunctionReference*, NodeKafka::KafkaConsumer*); ~KafkaConsumerConnect(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::KafkaConsumer * consumer; @@ -362,12 +363,12 @@ class KafkaConsumerConnect : public ErrorAwareWorker { class KafkaConsumerDisconnect : public ErrorAwareWorker { public: - KafkaConsumerDisconnect(Nan::Callback*, NodeKafka::KafkaConsumer*); + KafkaConsumerDisconnect(Napi::FunctionReference*, NodeKafka::KafkaConsumer*); ~KafkaConsumerDisconnect(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::KafkaConsumer * consumer; @@ -375,15 +376,15 @@ class KafkaConsumerDisconnect : public ErrorAwareWorker { class KafkaConsumerConsumeLoop : public MessageWorker { public: - KafkaConsumerConsumeLoop(Nan::Callback*, + KafkaConsumerConsumeLoop(Napi::FunctionReference*, NodeKafka::KafkaConsumer*, const int &, const int &); ~KafkaConsumerConsumeLoop(); static void ConsumeLoop(void *arg); void Close(); void Execute(const ExecutionMessageBus&); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); void HandleMessageCallback(RdKafka::Message*, RdKafka::ErrorCode); private: uv_thread_t thread_event_loop; @@ -396,12 +397,12 @@ class KafkaConsumerConsumeLoop : public MessageWorker { class KafkaConsumerConsume : public ErrorAwareWorker { public: - KafkaConsumerConsume(Nan::Callback*, NodeKafka::KafkaConsumer*, const int &); + KafkaConsumerConsume(Napi::FunctionReference*, NodeKafka::KafkaConsumer*, const int &); ~KafkaConsumerConsume(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::KafkaConsumer * consumer; const int m_timeout_ms; @@ -410,14 +411,14 @@ class KafkaConsumerConsume : public ErrorAwareWorker { class KafkaConsumerCommitted : public ErrorAwareWorker { public: - KafkaConsumerCommitted(Nan::Callback*, + KafkaConsumerCommitted(Napi::FunctionReference*, NodeKafka::KafkaConsumer*, std::vector &, const int &); ~KafkaConsumerCommitted(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::KafkaConsumer * m_consumer; std::vector m_topic_partitions; @@ -426,14 +427,14 @@ class KafkaConsumerCommitted : public ErrorAwareWorker { class KafkaConsumerCommitCb : public ErrorAwareWorker { public: - KafkaConsumerCommitCb(Nan::Callback*, + KafkaConsumerCommitCb(Napi::FunctionReference*, NodeKafka::KafkaConsumer*, std::optional> &); ~KafkaConsumerCommitCb(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::KafkaConsumer * m_consumer; std::optional> m_topic_partitions; @@ -441,13 +442,13 @@ class KafkaConsumerCommitCb : public ErrorAwareWorker { class KafkaConsumerSeek : public ErrorAwareWorker { public: - KafkaConsumerSeek(Nan::Callback*, NodeKafka::KafkaConsumer*, + KafkaConsumerSeek(Napi::FunctionReference*, NodeKafka::KafkaConsumer*, const RdKafka::TopicPartition *, const int &); ~KafkaConsumerSeek(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::KafkaConsumer * m_consumer; const RdKafka::TopicPartition * m_toppar; @@ -456,13 +457,13 @@ class KafkaConsumerSeek : public ErrorAwareWorker { class KafkaConsumerConsumeNum : public ErrorAwareWorker { public: - KafkaConsumerConsumeNum(Nan::Callback*, NodeKafka::KafkaConsumer*, + KafkaConsumerConsumeNum(Napi::FunctionReference*, NodeKafka::KafkaConsumer*, const uint32_t &, const int &, bool); ~KafkaConsumerConsumeNum(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::KafkaConsumer * m_consumer; const uint32_t m_num_messages; @@ -476,13 +477,13 @@ class KafkaConsumerConsumeNum : public ErrorAwareWorker { */ class AdminClientCreateTopic : public ErrorAwareWorker { public: - AdminClientCreateTopic(Nan::Callback*, NodeKafka::AdminClient*, + AdminClientCreateTopic(Napi::FunctionReference*, NodeKafka::AdminClient*, rd_kafka_NewTopic_t*, const int &); ~AdminClientCreateTopic(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::AdminClient * m_client; rd_kafka_NewTopic_t* m_topic; @@ -494,13 +495,13 @@ class AdminClientCreateTopic : public ErrorAwareWorker { */ class AdminClientDeleteTopic : public ErrorAwareWorker { public: - AdminClientDeleteTopic(Nan::Callback*, NodeKafka::AdminClient*, + AdminClientDeleteTopic(Napi::FunctionReference*, NodeKafka::AdminClient*, rd_kafka_DeleteTopic_t*, const int &); ~AdminClientDeleteTopic(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::AdminClient * m_client; rd_kafka_DeleteTopic_t* m_topic; @@ -512,13 +513,13 @@ class AdminClientDeleteTopic : public ErrorAwareWorker { */ class AdminClientCreatePartitions : public ErrorAwareWorker { public: - AdminClientCreatePartitions(Nan::Callback*, NodeKafka::AdminClient*, + AdminClientCreatePartitions(Napi::FunctionReference*, NodeKafka::AdminClient*, rd_kafka_NewPartitions_t*, const int &); ~AdminClientCreatePartitions(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::AdminClient * m_client; rd_kafka_NewPartitions_t* m_partitions; @@ -530,14 +531,14 @@ class AdminClientCreatePartitions : public ErrorAwareWorker { */ class AdminClientListGroups : public ErrorAwareWorker { public: - AdminClientListGroups(Nan::Callback *, NodeKafka::AdminClient *, bool, + AdminClientListGroups(Napi::FunctionReference *, NodeKafka::AdminClient *, bool, std::vector &, const int &); ~AdminClientListGroups(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::AdminClient *m_client; @@ -552,13 +553,13 @@ class AdminClientListGroups : public ErrorAwareWorker { */ class AdminClientDescribeGroups : public ErrorAwareWorker { public: - AdminClientDescribeGroups(Nan::Callback *, NodeKafka::AdminClient *, + AdminClientDescribeGroups(Napi::FunctionReference *, NodeKafka::AdminClient *, std::vector &, bool, const int &); ~AdminClientDescribeGroups(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::AdminClient *m_client; @@ -573,13 +574,13 @@ class AdminClientDescribeGroups : public ErrorAwareWorker { */ class AdminClientDeleteGroups : public ErrorAwareWorker { public: - AdminClientDeleteGroups(Nan::Callback *, NodeKafka::AdminClient *, + AdminClientDeleteGroups(Napi::FunctionReference *, NodeKafka::AdminClient *, rd_kafka_DeleteGroup_t **, size_t, const int &); ~AdminClientDeleteGroups(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::AdminClient *m_client; @@ -594,14 +595,14 @@ class AdminClientDeleteGroups : public ErrorAwareWorker { */ class AdminClientListConsumerGroupOffsets : public ErrorAwareWorker { public: - AdminClientListConsumerGroupOffsets(Nan::Callback *, NodeKafka::AdminClient *, + AdminClientListConsumerGroupOffsets(Napi::FunctionReference *, NodeKafka::AdminClient *, rd_kafka_ListConsumerGroupOffsets_t **, size_t, bool, const int &); ~AdminClientListConsumerGroupOffsets(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::AdminClient *m_client; @@ -617,14 +618,14 @@ class AdminClientListConsumerGroupOffsets : public ErrorAwareWorker { */ class AdminClientDeleteRecords : public ErrorAwareWorker { public: - AdminClientDeleteRecords(Nan::Callback *, NodeKafka::AdminClient *, + AdminClientDeleteRecords(Napi::FunctionReference *, NodeKafka::AdminClient *, rd_kafka_DeleteRecords_t **, size_t, const int &, const int &); ~AdminClientDeleteRecords(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::AdminClient *m_client; @@ -640,14 +641,14 @@ class AdminClientDeleteRecords : public ErrorAwareWorker { */ class AdminClientDescribeTopics : public ErrorAwareWorker { public: - AdminClientDescribeTopics(Nan::Callback *, NodeKafka::AdminClient *, + AdminClientDescribeTopics(Napi::FunctionReference *, NodeKafka::AdminClient *, rd_kafka_TopicCollection_t *, const bool, const int &); ~AdminClientDescribeTopics(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::AdminClient *m_client; @@ -662,14 +663,14 @@ class AdminClientDescribeTopics : public ErrorAwareWorker { */ class AdminClientListOffsets : public ErrorAwareWorker { public: - AdminClientListOffsets(Nan::Callback *, NodeKafka::AdminClient *, + AdminClientListOffsets(Napi::FunctionReference *, NodeKafka::AdminClient *, rd_kafka_topic_partition_list_t *, const int &, rd_kafka_IsolationLevel_t); ~AdminClientListOffsets(); void Execute(); - void HandleOKCallback(); - void HandleErrorCallback(); + void OnOK(); + void OnError(); private: NodeKafka::AdminClient *m_client; From 18bc36dd316ed0d5507506938d36ca423c40e4bd Mon Sep 17 00:00:00 2001 From: Charles Lowell Date: Mon, 24 Mar 2025 20:22:20 -0500 Subject: [PATCH 02/14] "fix" header files --- src/callbacks.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/callbacks.h b/src/callbacks.h index 6dae54f5..b8f46a18 100644 --- a/src/callbacks.h +++ b/src/callbacks.h @@ -45,7 +45,7 @@ class Dispatcher { uv_mutex_t async_lock; private: - inline static NAUV_WORK_CB(AsyncMessage_) { + inline static void func(uv_async_t *async) { Dispatcher *dispatcher = static_cast(async->data); dispatcher->Flush(); @@ -224,8 +224,8 @@ class Rebalance : public RdKafka::RebalanceCb { std::vector &); RebalanceDispatcher dispatcher; - private: - v8::Persistent m_cb; + // private: + // v8::Persistent m_cb; }; class OffsetCommitDispatcher : public Dispatcher { @@ -243,8 +243,8 @@ class OffsetCommit : public RdKafka::OffsetCommitCb { void offset_commit_cb(RdKafka::ErrorCode, std::vector &); // NOLINT OffsetCommitDispatcher dispatcher; - private: - v8::Persistent m_cb; + // private: + // v8::Persistent m_cb; }; class OAuthBearerTokenRefreshDispatcher : public Dispatcher { From 957c9e53a08acf090a8cce8fc31d7e2fca4acfed Mon Sep 17 00:00:00 2001 From: Charles Lowell Date: Mon, 24 Mar 2025 22:13:35 -0500 Subject: [PATCH 03/14] upgrade bindings.cc and constants entry point --- src/binding.cc | 42 +++++++++++++++++++++++------------------- 1 file changed, 23 insertions(+), 19 deletions(-) diff --git a/src/binding.cc b/src/binding.cc index f93938f6..7d7753ad 100644 --- a/src/binding.cc +++ b/src/binding.cc @@ -9,17 +9,17 @@ */ #include -#include #include "src/binding.h" -using NodeKafka::Producer; -using NodeKafka::KafkaConsumer; using NodeKafka::AdminClient; +using NodeKafka::KafkaConsumer; +using NodeKafka::Producer; using NodeKafka::Topic; -using RdKafka::ErrorCode; +using Napi::Number; Napi::Value NodeRdKafkaErr2Str(const Napi::CallbackInfo& info) { + Napi::Env env = info.Env(); int points = info[0].As().Int32Value(); // Cast to error code RdKafka::ErrorCode err = static_cast(points); @@ -29,7 +29,8 @@ Napi::Value NodeRdKafkaErr2Str(const Napi::CallbackInfo& info) { return Napi::String::New(env, errstr); } -Napi::Value NodeRdKafkaBuildInFeatures(const Napi::CallbackInfo& info) { +Napi::Value NodeRdKafkaBuildInFeatures(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); RdKafka::Conf * config = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL); std::string features; @@ -43,34 +44,37 @@ Napi::Value NodeRdKafkaBuildInFeatures(const Napi::CallbackInfo& info) { delete config; } -void ConstantsInit(Napi::Object exports) { +void defconst(Napi::Env env, Napi::Object target, const char *name, Napi::Value value) { + target.Set(Napi::String::New(env, name), value); +} + +void ConstantsInit(Napi::Env env, Napi::Object exports) { Napi::Object topicConstants = Napi::Object::New(env); // RdKafka Error Code definitions - NODE_DEFINE_CONSTANT(topicConstants, RdKafka::Topic::PARTITION_UA); - NODE_DEFINE_CONSTANT(topicConstants, RdKafka::Topic::OFFSET_BEGINNING); - NODE_DEFINE_CONSTANT(topicConstants, RdKafka::Topic::OFFSET_END); - NODE_DEFINE_CONSTANT(topicConstants, RdKafka::Topic::OFFSET_STORED); - NODE_DEFINE_CONSTANT(topicConstants, RdKafka::Topic::OFFSET_INVALID); + defconst(env, topicConstants, "RdKafka::Topic::PARTITION_UA", Number::New(env, RdKafka::Topic::PARTITION_UA)); + defconst(env, topicConstants, "RdKafka::Topic::OFFSET_BEGINNING", Number::New(env, RdKafka::Topic::OFFSET_BEGINNING)); + defconst(env, topicConstants, "RdKafka::Topic::OFFSET_END", Number::New(env, RdKafka::Topic::OFFSET_END)); + defconst(env, topicConstants, "RdKafka::Topic::OFFSET_STORED", Number::New(env, RdKafka::Topic::OFFSET_STORED)); + defconst(env, topicConstants, "RdKafka::Topic::OFFSET_INVALID", Number::New(env, RdKafka::Topic::OFFSET_INVALID)); (exports).Set(Napi::String::New(env, "topic"), topicConstants); - (exports).Set(Napi::String::New(env, "err2str"), - Napi::GetFunction(Napi::Function::New(env, NodeRdKafkaErr2Str))); // NOLINT + (exports).Set(Napi::String::New(env, "err2str"),Napi::Function::New(env, NodeRdKafkaErr2Str)); - (exports).Set(Napi::String::New(env, "features"), - Napi::GetFunction(Napi::Function::New(env, NodeRdKafkaBuildInFeatures))); // NOLINT + (exports).Set(Napi::String::New(env, "features"), Napi::Function::New(env, NodeRdKafkaBuildInFeatures)); } -void Init(Napi::Object exports, Napi::Value m_, void* v_) { - KafkaConsumer::Init(exports); +Napi::Object Init(Napi::Env env, Napi::Object exports) { + KafkaConsumer::Init(env, exports); Producer::Init(exports); AdminClient::Init(exports); Topic::Init(exports); - ConstantsInit(exports); + ConstantsInit(env, exports); (exports).Set(Napi::String::New(env, "librdkafkaVersion"), - Napi::New(env, RdKafka::version_str().c_str())); + Napi::String::New(env, RdKafka::version_str().c_str())); + return exports; } NODE_API_MODULE(kafka, Init) From b1f432bb5a454f5d3e7fa09d8f8b0addf75a0125 Mon Sep 17 00:00:00 2001 From: Charles Lowell Date: Tue, 25 Mar 2025 07:50:18 -0500 Subject: [PATCH 04/14] Move connection into template --- src/connection.cc | 686 ------------------------------------------ src/connection.h | 686 +++++++++++++++++++++++++++++++++++++++--- src/kafka-consumer.cc | 111 +++---- src/kafka-consumer.h | 56 ++-- src/producer.h | 2 +- src/topic.h | 3 +- 6 files changed, 739 insertions(+), 805 deletions(-) diff --git a/src/connection.cc b/src/connection.cc index f03b323c..e69de29b 100644 --- a/src/connection.cc +++ b/src/connection.cc @@ -1,686 +0,0 @@ -/* - * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library - * - * Copyright (c) 2016-2023 Blizzard Entertainment - * (c) 2023 Confluent, Inc. - * - * This software may be modified and distributed under the terms - * of the MIT license. See the LICENSE.txt file for details. - */ -#include "src/connection.h" - -#include -#include -#include - -#include "src/workers.h" - -using RdKafka::Conf; - -namespace NodeKafka { - -/** - * @brief Connection v8 wrapped object. - * - * Wraps the RdKafka::Handle object with compositional inheritence and - * provides sensible defaults for exposing callbacks to node - * - * This object can't itself expose methods to the prototype directly, as far - * as I can tell. But it can provide the NAN_METHODS that just need to be added - * to the prototype. Since connections, etc. are managed differently based on - * whether it is a producer or consumer, they manage that. This base class - * handles some of the wrapping functionality and more importantly, the - * configuration of callbacks - * - * Any callback available to both consumers and producers, like logging or - * events will be handled in here. - * - * @sa RdKafka::Handle - * @sa NodeKafka::Client - */ - -Connection::Connection(Conf* gconfig, Conf* tconfig): - m_event_cb(), - m_gconfig(gconfig), - m_tconfig(tconfig) { - std::string errstr; - - m_client = NULL; - m_is_closing = false; - uv_rwlock_init(&m_connection_lock); - - // Try to set the event cb. Shouldn't be an error here, but if there - // is, it doesn't get reported. - // - // Perhaps node new methods should report this as an error? But there - // isn't anything the user can do about it. - m_gconfig->set("event_cb", &m_event_cb, errstr); - } - -/* Use an existing Connection object as the underlying for this object. - * At this point, the underlying connection is assumed to be connected with - * the m_client set. */ -Connection::Connection(Connection *existing): - m_event_cb() { - m_client = existing->m_client; - - m_gconfig = existing->m_gconfig; - m_tconfig = existing->m_tconfig; - - m_is_closing = false; - m_has_underlying = true; - - // We must share the same connection lock as the existing connection to - // avoid getting disconnected while the existing connection is still in use. - m_connection_lock = existing->m_connection_lock; - } - - -Connection::~Connection() { - // The underlying connection will take care of cleanup. - if (m_has_underlying) { - return; - } - - uv_rwlock_destroy(&m_connection_lock); - if (m_tconfig) { - delete m_tconfig; - } - - if (m_gconfig) { - delete m_gconfig; - } -} - -Baton Connection::rdkafkaErrorToBaton(RdKafka::Error* error) { - if (NULL == error) { - return Baton(RdKafka::ERR_NO_ERROR); - } else { - Baton result(error->code(), error->str(), error->is_fatal(), - error->is_retriable(), error->txn_requires_abort()); - delete error; - return result; - } -} - -// If OAUTHBEARER authentication is set up, then push the callbacks onto the -// SASL queue so we don't need to keep polling. This method should be called -// before the client is created. -Baton Connection::setupSaslOAuthBearerConfig() { - if (!m_gconfig->is_sasl_oauthbearer()) { - return Baton(RdKafka::ERR_NO_ERROR); - } - - std::string errstr; - if (m_gconfig->enable_sasl_queue(true, errstr) != RdKafka::Conf::CONF_OK) { - return Baton(RdKafka::ERR__STATE, errstr); - } - - return Baton(RdKafka::ERR_NO_ERROR); -} - -// If OAUTHBEARER authentication is set up, then handle the callbacks on -// the background thread. This method should be called after the client is -// created and only if `setupSaslOAuthBearerConfig` is called earlier. -Baton Connection::setupSaslOAuthBearerBackgroundQueue() { - if (!m_gconfig->is_sasl_oauthbearer()) { - return Baton(RdKafka::ERR_NO_ERROR); - } - - RdKafka::Error* error = m_client->sasl_background_callbacks_enable(); - return rdkafkaErrorToBaton(error); -} - -RdKafka::TopicPartition* Connection::GetPartition(std::string &topic) { - return RdKafka::TopicPartition::create(topic, RdKafka::Topic::PARTITION_UA); -} - -RdKafka::TopicPartition* Connection::GetPartition(std::string &topic, int partition) { // NOLINT - return RdKafka::TopicPartition::create(topic, partition); -} - -bool Connection::IsConnected() const { - return !m_is_closing && m_client != NULL; -} - -bool Connection::IsClosing() const { - return m_client != NULL && m_is_closing; -} - -RdKafka::Handle* Connection::GetClient() { - return m_client; -} - -std::string Connection::Name() const { - if (!IsConnected()) { - return std::string(""); - } - return std::string(m_client->name()); -} - -Baton Connection::CreateTopic(std::string topic_name) { - return CreateTopic(topic_name, NULL); -} - -Baton Connection::CreateTopic(std::string topic_name, RdKafka::Conf* conf) { - std::string errstr; - - RdKafka::Topic* topic = NULL; - - if (IsConnected()) { - scoped_shared_read_lock lock(m_connection_lock); - if (IsConnected()) { - topic = RdKafka::Topic::create(m_client, topic_name, conf, errstr); - } else { - return Baton(RdKafka::ErrorCode::ERR__STATE); - } - } else { - return Baton(RdKafka::ErrorCode::ERR__STATE); - } - - if (!errstr.empty()) { - return Baton(RdKafka::ErrorCode::ERR_TOPIC_EXCEPTION, errstr); - } - - // Maybe do it this way later? Then we don't need to do static_cast - // - return Baton(topic); -} - -Baton Connection::QueryWatermarkOffsets( - std::string topic_name, int32_t partition, - int64_t* low_offset, int64_t* high_offset, - int timeout_ms) { - // Check if we are connected first - - RdKafka::ErrorCode err; - - if (IsConnected()) { - scoped_shared_read_lock lock(m_connection_lock); - if (IsConnected()) { - // Always send true - we - err = m_client->query_watermark_offsets(topic_name, partition, - low_offset, high_offset, timeout_ms); - - } else { - err = RdKafka::ERR__STATE; - } - } else { - err = RdKafka::ERR__STATE; - } - - return Baton(err); -} - -/** - * Look up the offsets for the given partitions by timestamp. - * - * The returned offset for each partition is the earliest offset whose - * timestamp is greater than or equal to the given timestamp in the - * corresponding partition. - * - * @returns A baton specifying the error state. If there was no error, - * there still may be an error on a topic partition basis. - */ -Baton Connection::OffsetsForTimes( - std::vector &toppars, - int timeout_ms) { - // Check if we are connected first - - RdKafka::ErrorCode err; - - if (IsConnected()) { - scoped_shared_read_lock lock(m_connection_lock); - if (IsConnected()) { - // Always send true - we - err = m_client->offsetsForTimes(toppars, timeout_ms); - - } else { - err = RdKafka::ERR__STATE; - } - } else { - err = RdKafka::ERR__STATE; - } - - return Baton(err); -} - -Baton Connection::GetMetadata( - bool all_topics, std::string topic_name, int timeout_ms) { - RdKafka::Topic* topic = NULL; - RdKafka::ErrorCode err; - - std::string errstr; - - if (!topic_name.empty()) { - Baton b = CreateTopic(topic_name); - if (b.err() == RdKafka::ErrorCode::ERR_NO_ERROR) { - topic = b.data(); - } - } - - RdKafka::Metadata* metadata = NULL; - - if (!errstr.empty()) { - return Baton(RdKafka::ERR_TOPIC_EXCEPTION); - } - - if (IsConnected()) { - scoped_shared_read_lock lock(m_connection_lock); - if (IsConnected()) { - // Always send true - we - err = m_client->metadata(all_topics, topic, &metadata, timeout_ms); - } else { - err = RdKafka::ERR__STATE; - } - } else { - err = RdKafka::ERR__STATE; - } - - if (topic != NULL) - delete topic; - - if (err == RdKafka::ERR_NO_ERROR) { - return Baton(metadata); - } else { - // metadata is not set here - // @see https://github.com/confluentinc/librdkafka/blob/master/src-cpp/rdkafkacpp.h#L860 // NOLINT - return Baton(err); - } -} - -Baton Connection::SetSaslCredentials( - std::string username, std::string password) { - RdKafka::Error *error; - - if (IsConnected()) { - scoped_shared_read_lock lock(m_connection_lock); - if (IsConnected()) { - // Always send true - we - error = m_client->sasl_set_credentials(username, password); - } else { - return Baton(RdKafka::ERR__STATE); - } - } else { - return Baton(RdKafka::ERR__STATE); - } - - return rdkafkaErrorToBaton(error); -} - -Baton Connection::SetOAuthBearerToken( - const std::string& value, int64_t lifetime_ms, - const std::string& principal_name, - const std::list& extensions) { - RdKafka::ErrorCode error_code; - std::string errstr; - - if (IsConnected()) { - scoped_shared_read_lock lock(m_connection_lock); - if (IsConnected()) { - error_code = m_client->oauthbearer_set_token( - value, lifetime_ms, principal_name, extensions, errstr); - } else { - return Baton(RdKafka::ERR__STATE); - } - } else { - return Baton(RdKafka::ERR__STATE); - } - - if (error_code != RdKafka::ERR_NO_ERROR) { - return Baton(error_code, errstr); - } - - return Baton(error_code); -} - -Baton Connection::SetOAuthBearerTokenFailure(const std::string& errstr) { - RdKafka::ErrorCode error_code; - - if (IsConnected()) { - scoped_shared_read_lock lock(m_connection_lock); - if (IsConnected()) { - error_code = m_client->oauthbearer_set_token_failure(errstr); - } else { - return Baton(RdKafka::ERR__STATE); - } - } else { - return Baton(RdKafka::ERR__STATE); - } - - return Baton(error_code); -} - -void Connection::ConfigureCallback( - const std::string &string_key, const Napi::Function &cb, bool add) { - if (string_key.compare("event_cb") == 0) { - if (add) { - this->m_event_cb.dispatcher.AddCallback(cb); - } else { - this->m_event_cb.dispatcher.RemoveCallback(cb); - } - } -} - -// NAN METHODS - -Napi::Value Connection::NodeGetMetadata(const Napi::CallbackInfo& info) { - Napi::HandleScope scope(env); - - Connection* obj = ObjectWrap::Unwrap(info.This()); - - Napi::Object config; - if (info[0].IsObject()) { - config = info[0].As(); - } else { - config = Napi::Object::New(env); - } - - if (!info[1].IsFunction()) { - Napi::Error::New(env, "Second parameter must be a callback").ThrowAsJavaScriptException(); - return env.Null(); - } - - Napi::Function cb = info[1].As(); - - std::string topic = GetParameter(config, "topic", ""); - bool allTopics = GetParameter(config, "allTopics", true); - int timeout_ms = GetParameter(config, "timeout", 30000); - - Napi::FunctionReference *callback = new Napi::FunctionReference(cb); - - Napi::AsyncQueueWorker(new Workers::ConnectionMetadata( - callback, obj, topic, timeout_ms, allTopics)); - - return env.Null(); -} - -Napi::Value Connection::NodeOffsetsForTimes(const Napi::CallbackInfo& info) { - Napi::HandleScope scope(env); - - if (info.Length() < 3 || !info[0].IsArray()) { - // Just throw an exception - Napi::Error::New(env, "Need to specify an array of topic partitions").ThrowAsJavaScriptException(); - return env.Null(); - } - - std::vector toppars = - Conversion::TopicPartition::FromV8Array(info[0].As()); - - int timeout_ms; - Napi::Maybe maybeTimeout = - info[1].As(.As().Uint32Value()); - - if (maybeTimeout.IsNothing()) { - timeout_ms = 1000; - } else { - timeout_ms = static_cast(maybeTimeout); - } - - Napi::Function cb = info[2].As(); - Napi::FunctionReference *callback = new Napi::FunctionReference(cb); - - Connection* handle = ObjectWrap::Unwrap(info.This()); - - Napi::AsyncQueueWorker( - new Workers::Handle::OffsetsForTimes(callback, handle, - toppars, timeout_ms)); - - return env.Null(); -} - -Napi::Value Connection::NodeQueryWatermarkOffsets(const Napi::CallbackInfo& info) { - Napi::HandleScope scope(env); - - Connection* obj = ObjectWrap::Unwrap(info.This()); - - if (!info[0].IsString()) { - Napi::Error::New(env, "1st parameter must be a topic string").ThrowAsJavaScriptException(); -; - return; - } - - if (!info[1].IsNumber()) { - Napi::Error::New(env, "2nd parameter must be a partition number").ThrowAsJavaScriptException(); - return env.Null(); - } - - if (!info[2].IsNumber()) { - Napi::Error::New(env, "3rd parameter must be a number of milliseconds").ThrowAsJavaScriptException(); - return env.Null(); - } - - if (!info[3].IsFunction()) { - Napi::Error::New(env, "4th parameter must be a callback").ThrowAsJavaScriptException(); - return env.Null(); - } - - // Get string pointer for the topic name - std::string topicUTF8 = info[0].As(.To()); - // The first parameter is the topic - std::string topic_name(*topicUTF8); - - // Second parameter is the partition - int32_t partition = info[1].As().Int32Value(); - - // Third parameter is the timeout - int timeout_ms = info[2].As().Int32Value(); - - // Fourth parameter is the callback - Napi::Function cb = info[3].As(); - Napi::FunctionReference *callback = new Napi::FunctionReference(cb); - - Napi::AsyncQueueWorker(new Workers::ConnectionQueryWatermarkOffsets( - callback, obj, topic_name, partition, timeout_ms)); - - return env.Null(); -} - -Napi::Value Connection::NodeSetSaslCredentials(const Napi::CallbackInfo& info) { - if (!info[0].IsString()) { - Napi::Error::New(env, "1st parameter must be a username string").ThrowAsJavaScriptException(); - return env.Null(); - } - - if (!info[1].IsString()) { - Napi::Error::New(env, "2nd parameter must be a password string").ThrowAsJavaScriptException(); - return env.Null(); - } - - // Get string pointer for the username - std::string usernameUTF8 = info[0].As(.To()); - // The first parameter is the username - std::string username(*usernameUTF8); - - // Get string pointer for the password - std::string passwordUTF8 = info[1].As(.To()); - // The first parameter is the password - std::string password(*passwordUTF8); - - Connection* obj = ObjectWrap::Unwrap(info.This()); - Baton b = obj->SetSaslCredentials(username, password); - - if (b.err() != RdKafka::ERR_NO_ERROR) { - Napi::Value errorObject = b.ToObject(); - Napi::Error::New(env, errorObject).ThrowAsJavaScriptException(); - return env.Null(); - } - - return env.Null(); -} - - -// Node methods -Napi::Value Connection::NodeConfigureCallbacks(const Napi::CallbackInfo& info) { - Napi::HandleScope scope(env); - - if (info.Length() < 2 || - !info[0].IsBoolean() || - !info[1].IsObject()) { - // Just throw an exception - Napi::Error::New(env, "Need to specify a callbacks object").ThrowAsJavaScriptException(); - return env.Null(); - } - v8::Local context = Napi::GetCurrentContext(); - Connection* obj = ObjectWrap::Unwrap(info.This()); - - const bool add = info[0].As().Value().ToChecked(); - Napi::Object configs_object = - info[1].ToObject(context); - Napi::Array configs_property_names = - configs_object->GetOwnPropertyNames(context); - - for (unsigned int j = 0; j < configs_property_names->Length(); ++j) { - std::string configs_string_key; - - Napi::Value configs_key = - (configs_property_names).Get(j); - Napi::Value configs_value = - (configs_object).Get(configs_key); - - int config_type = 0; - if (configs_value.IsObject() && configs_key.IsString()) { - std::string configs_utf8_key = configs_key.As(); - configs_string_key = std::string(*configs_utf8_key); - if (configs_string_key.compare("global") == 0) { - config_type = 1; - } else if (configs_string_key.compare("topic") == 0) { - config_type = 2; - } else if (configs_string_key.compare("event") == 0) { - config_type = 3; - } else { - continue; - } - } else { - continue; - } - - Napi::Object object = - configs_value->ToObject(context); - Napi::Array property_names = - object->GetOwnPropertyNames(context); - - for (unsigned int i = 0; i < property_names->Length(); ++i) { - std::string errstr; - std::string string_key; - - Napi::Value key = (property_names).Get(i); - Napi::Value value = (object).Get(key); - - if (key.IsString()) { - std::string utf8_key = key.As(); - string_key = std::string(*utf8_key); - } else { - continue; - } - - if (value->IsFunction()) { - Napi::Function cb = value.As(); - switch (config_type) { - case 1: - obj->m_gconfig->ConfigureCallback(string_key, cb, add, errstr); - if (!errstr.empty()) { - Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); - return env.Null(); - } - break; - case 2: - obj->m_tconfig->ConfigureCallback(string_key, cb, add, errstr); - if (!errstr.empty()) { - Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); - return env.Null(); - } - break; - case 3: - obj->ConfigureCallback(string_key, cb, add); - break; - } - } - } - } - - return env.True(); -} - -Napi::Value Connection::NodeSetOAuthBearerToken(const Napi::CallbackInfo& info) { - if (!info[0].IsString()) { - Napi::Error::New(env, "1st parameter must be a token string").ThrowAsJavaScriptException(); - return env.Null(); - } - - if (!info[1].IsNumber()) { - Napi::Error::New(env, "2nd parameter must be a lifetime_ms number").ThrowAsJavaScriptException(); - return env.Null(); - } - - if (!info[2].IsString()) { - Napi::Error::New(env, "3rd parameter must be a principal_name string").ThrowAsJavaScriptException(); - return env.Null(); - } - - if (!info[3].IsNullOrUndefined() && !info[3].IsArray()) { - Napi::Error::New(env, "4th parameter must be an extensions array or null").ThrowAsJavaScriptException(); - return env.Null(); - } - - // Get string pointer for the token - std::string tokenUtf8 = info[0].As(.To()); - std::string token(*tokenUtf8); - - // Get the lifetime_ms - int64_t lifetime_ms = info[1].As().Int64Value(); - - // Get string pointer for the principal_name - std::string principal_nameUtf8 = - info[2].As(.To()); - std::string principal_name(*principal_nameUtf8); - - // Get the extensions (if any) - std::list extensions; - if (!info[3].IsNullOrUndefined()) { - Napi::Array extensionsArray = info[3].As(); - extensions = v8ArrayToStringList(extensionsArray); - } - - Connection* obj = ObjectWrap::Unwrap(info.This()); - Baton b = - obj->SetOAuthBearerToken(token, lifetime_ms, principal_name, extensions); - - if (b.err() != RdKafka::ERR_NO_ERROR) { - Napi::Value errorObject = b.ToObject(); - Napi::Error::New(env, errorObject).ThrowAsJavaScriptException(); - return env.Null(); - } - - return env.Null(); -} - -Napi::Value Connection::NodeSetOAuthBearerTokenFailure(const Napi::CallbackInfo& info) { - if (!info[0].IsString()) { - Napi::Error::New(env, "1st parameter must be an error string").ThrowAsJavaScriptException(); - return env.Null(); - } - - // Get string pointer for the error string - std::string errstrUtf8 = info[0].As(.To()); - std::string errstr(*errstrUtf8); - - Connection* obj = ObjectWrap::Unwrap(info.This()); - Baton b = obj->SetOAuthBearerTokenFailure(errstr); - - if (b.err() != RdKafka::ERR_NO_ERROR) { - Napi::Value errorObject = b.ToObject(); - Napi::Error::New(env, errorObject).ThrowAsJavaScriptException(); - return env.Null(); - } - - return env.Null(); -} - -Napi::Value Connection::NodeName(const Napi::CallbackInfo& info) { - Connection* obj = ObjectWrap::Unwrap(info.This()); - std::string name = obj->Name(); - return Napi::New(env, name); -} - -} // namespace NodeKafka diff --git a/src/connection.h b/src/connection.h index 63442ee4..4521d695 100644 --- a/src/connection.h +++ b/src/connection.h @@ -47,26 +47,216 @@ namespace NodeKafka { * @sa NodeKafka::Client */ -class Connection : public Napi::ObjectWrap { +template class Connection : public Napi::ObjectWrap { public: - bool IsConnected() const; - bool IsClosing() const; + bool IsConnected() const { + return !m_is_closing && m_client != NULL; + } + bool IsClosing() const { + return m_client != NULL && m_is_closing; + } + // Baton - Baton CreateTopic(std::string); - Baton CreateTopic(std::string, RdKafka::Conf*); - Baton GetMetadata(bool, std::string, int); - Baton QueryWatermarkOffsets(std::string, int32_t, int64_t*, int64_t*, int); - Baton OffsetsForTimes(std::vector &, int); - Baton SetSaslCredentials(std::string, std::string); - Baton SetOAuthBearerToken(const std::string&, int64_t, const std::string&, - const std::list&); - Baton SetOAuthBearerTokenFailure(const std::string&); + Baton CreateTopic(std::string topic_name, RdKafka::Conf* conf = NULL) { + std::string errstr; - RdKafka::Handle* GetClient(); + RdKafka::Topic* topic = NULL; - static RdKafka::TopicPartition* GetPartition(std::string &); - static RdKafka::TopicPartition* GetPartition(std::string &, int); + if (IsConnected()) { + scoped_shared_read_lock lock(m_connection_lock); + if (IsConnected()) { + topic = RdKafka::Topic::create(m_client, topic_name, conf, errstr); + } else { + return Baton(RdKafka::ErrorCode::ERR__STATE); + } + } else { + return Baton(RdKafka::ErrorCode::ERR__STATE); + } + + if (!errstr.empty()) { + return Baton(RdKafka::ErrorCode::ERR_TOPIC_EXCEPTION, errstr); + } + + // Maybe do it this way later? Then we don't need to do static_cast + // + return Baton(topic); + } + + Baton GetMetadata(bool all_topics, std::string topic_name, int timeout_ms) { + RdKafka::Topic* topic = NULL; + RdKafka::ErrorCode err; + + std::string errstr; + + if (!topic_name.empty()) { + Baton b = CreateTopic(topic_name); + if (b.err() == RdKafka::ErrorCode::ERR_NO_ERROR) { + topic = b.data(); + } + } + + RdKafka::Metadata* metadata = NULL; + + if (!errstr.empty()) { + return Baton(RdKafka::ERR_TOPIC_EXCEPTION); + } + + if (IsConnected()) { + scoped_shared_read_lock lock(m_connection_lock); + if (IsConnected()) { + // Always send true - we + err = m_client->metadata(all_topics, topic, &metadata, timeout_ms); + } else { + err = RdKafka::ERR__STATE; + } + } else { + err = RdKafka::ERR__STATE; + } + + if (topic != NULL) + delete topic; + + if (err == RdKafka::ERR_NO_ERROR) { + return Baton(metadata); + } else { + // metadata is not set here + // @see https://github.com/confluentinc/librdkafka/blob/master/src-cpp/rdkafkacpp.h#L860 // NOLINT + return Baton(err); + } + } + + Baton QueryWatermarkOffsets( + std::string topic_name, int32_t partition, + int64_t* low_offset, int64_t* high_offset, + int timeout_ms) { + // Check if we are connected first + + RdKafka::ErrorCode err; + + if (IsConnected()) { + scoped_shared_read_lock lock(m_connection_lock); + if (IsConnected()) { + // Always send true - we + err = m_client->query_watermark_offsets(topic_name, partition, + low_offset, high_offset, timeout_ms); + + } else { + err = RdKafka::ERR__STATE; + } + } else { + err = RdKafka::ERR__STATE; + } + + return Baton(err); + } + + /** + * Look up the offsets for the given partitions by timestamp. + * + * The returned offset for each partition is the earliest offset whose + * timestamp is greater than or equal to the given timestamp in the + * corresponding partition. + * + * @returns A baton specifying the error state. If there was no error, + * there still may be an error on a topic partition basis. + */ + Baton OffsetsForTimes( + std::vector &toppars, + int timeout_ms) { + // Check if we are connected first + + RdKafka::ErrorCode err; + + if (IsConnected()) { + scoped_shared_read_lock lock(m_connection_lock); + if (IsConnected()) { + // Always send true - we + err = m_client->offsetsForTimes(toppars, timeout_ms); + + } else { + err = RdKafka::ERR__STATE; + } + } else { + err = RdKafka::ERR__STATE; + } + + return Baton(err); + } + + Baton SetSaslCredentials( + std::string username, std::string password) { + RdKafka::Error *error; + + if (IsConnected()) { + scoped_shared_read_lock lock(m_connection_lock); + if (IsConnected()) { + // Always send true - we + error = m_client->sasl_set_credentials(username, password); + } else { + return Baton(RdKafka::ERR__STATE); + } + } else { + return Baton(RdKafka::ERR__STATE); + } + + return rdkafkaErrorToBaton(error); + } + + Baton SetOAuthBearerToken( + const std::string& value, int64_t lifetime_ms, + const std::string& principal_name, + const std::list& extensions) { + RdKafka::ErrorCode error_code; + std::string errstr; + + if (IsConnected()) { + scoped_shared_read_lock lock(m_connection_lock); + if (IsConnected()) { + error_code = m_client->oauthbearer_set_token( + value, lifetime_ms, principal_name, extensions, errstr); + } else { + return Baton(RdKafka::ERR__STATE); + } + } else { + return Baton(RdKafka::ERR__STATE); + } + + if (error_code != RdKafka::ERR_NO_ERROR) { + return Baton(error_code, errstr); + } + + return Baton(error_code); + } + + Baton SetOAuthBearerTokenFailure(const std::string& errstr) { + RdKafka::ErrorCode error_code; + + if (IsConnected()) { + scoped_shared_read_lock lock(m_connection_lock); + if (IsConnected()) { + error_code = m_client->oauthbearer_set_token_failure(errstr); + } else { + return Baton(RdKafka::ERR__STATE); + } + } else { + return Baton(RdKafka::ERR__STATE); + } + + return Baton(error_code); + } + + RdKafka::Handle* GetClient() { + return m_client; + } + + static RdKafka::TopicPartition* GetPartition(std::string &topic) { + return RdKafka::TopicPartition::create(topic, RdKafka::Topic::PARTITION_UA); + } + + static RdKafka::TopicPartition* GetPartition(std::string &topic, int partition) { // NOLINT + return RdKafka::TopicPartition::create(topic, partition); + } Callbacks::Event m_event_cb; @@ -74,21 +264,126 @@ class Connection : public Napi::ObjectWrap { virtual void DeactivateDispatchers() = 0; virtual void ConfigureCallback( - const std::string &string_key, const Napi::Function &cb, bool add); + const std::string &string_key, const Napi::Function &cb, bool add) { + if (string_key.compare("event_cb") == 0) { + if (add) { + this->m_event_cb.dispatcher.AddCallback(cb); + } else { + this->m_event_cb.dispatcher.RemoveCallback(cb); + } + } + } + + std::string Name() const { + if (!IsConnected()) { + return std::string(""); + } + return std::string(m_client->name()); + } + + +protected: + Connection(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + if (!info.IsConstructCall()) { + Napi::Error::New(env, "non-constructor invocation not supported").ThrowAsJavaScriptException(); + } + + if (info.Length() < 2) { + Napi::Error::New(env, "You must supply global and topic configuration").ThrowAsJavaScriptException(); - std::string Name() const; + } + } + Connection(Conf* gconfig, Conf* tconfig): + m_event_cb(), + m_gconfig(gconfig), + m_tconfig(tconfig) { + std::string errstr; - protected: - Connection(Conf*, Conf*); - explicit Connection(Connection *); - ~Connection(); + m_client = NULL; + m_is_closing = false; + uv_rwlock_init(&m_connection_lock); + + // Try to set the event cb. Shouldn't be an error here, but if there + // is, it doesn't get reported. + // + // Perhaps node new methods should report this as an error? But there + // isn't anything the user can do about it. + m_gconfig->set("event_cb", &m_event_cb, errstr); + } + explicit Connection(Connection *existing): + m_event_cb() { + m_client = existing->m_client; + + m_gconfig = existing->m_gconfig; + m_tconfig = existing->m_tconfig; + + m_is_closing = false; + m_has_underlying = true; + + // We must share the same connection lock as the existing connection to + // avoid getting disconnected while the existing connection is still in use. + m_connection_lock = existing->m_connection_lock; + } + virtual ~Connection() { + // The underlying connection will take care of cleanup. + if (m_has_underlying) { + return; + } + + uv_rwlock_destroy(&m_connection_lock); + if (m_tconfig) { + delete m_tconfig; + } + + if (m_gconfig) { + delete m_gconfig; + } + } static Napi::FunctionReference constructor; - static void New(const Napi::CallbackInfo& info); - static Baton rdkafkaErrorToBaton(RdKafka::Error* error); - Baton setupSaslOAuthBearerConfig(); - Baton setupSaslOAuthBearerBackgroundQueue(); + static Baton rdkafkaErrorToBaton(RdKafka::Error* error) { + if (NULL == error) { + return Baton(RdKafka::ERR_NO_ERROR); + } else { + Baton result(error->code(), error->str(), error->is_fatal(), + error->is_retriable(), error->txn_requires_abort()); + delete error; + return result; + } + } + + // If OAUTHBEARER authentication is set up, then push the callbacks onto the + // SASL queue so we don't need to keep polling. This method should be called + // before the client is created. + Baton setupSaslOAuthBearerConfig() { + if (!m_gconfig->is_sasl_oauthbearer()) { + return Baton(RdKafka::ERR_NO_ERROR); + } + + std::string errstr; + if (m_gconfig->enable_sasl_queue(true, errstr) != RdKafka::Conf::CONF_OK) { + return Baton(RdKafka::ERR__STATE, errstr); + } + + return Baton(RdKafka::ERR_NO_ERROR); + } + + // If OAUTHBEARER authentication is set up, then handle the callbacks on + // the background thread. This method should be called after the client is + // created and only if `setupSaslOAuthBearerConfig` is called earlier. + Baton setupSaslOAuthBearerBackgroundQueue() { + if (!m_gconfig->is_sasl_oauthbearer()) { + return Baton(RdKafka::ERR_NO_ERROR); + } + + RdKafka::Error* error = m_client->sasl_background_callbacks_enable(); + return rdkafkaErrorToBaton(error); + } + + // Baton setupSaslOAuthBearerConfig(); + // Baton setupSaslOAuthBearerBackgroundQueue(); bool m_is_closing; @@ -99,16 +394,339 @@ class Connection : public Napi::ObjectWrap { uv_rwlock_t m_connection_lock; bool m_has_underlying = false; - RdKafka::Handle* m_client; + RdKafka::Handle *m_client; + + // NAPI Methods + + + Napi::Value NodeGetMetadata(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); + + Connection* obj = this; + + Napi::Object config; + if (info[0].IsObject()) { + config = info[0].As(); + } else { + config = Napi::Object::New(env); + } + + if (!info[1].IsFunction()) { + Napi::Error::New(env, "Second parameter must be a callback").ThrowAsJavaScriptException(); + return env.Null(); + } + + Napi::Function cb = info[1].As(); + + std::string topic = GetParameter(config, "topic", ""); + bool allTopics = GetParameter(config, "allTopics", true); + int timeout_ms = GetParameter(config, "timeout", 30000); + + Napi::FunctionReference* callback = new Napi::FunctionReference(); + *callback = Napi::Persistent(cb); + + Napi::AsyncWorker::Queue(new Workers::ConnectionMetadata( + callback, obj, topic, timeout_ms, allTopics)); + + return env.Null(); + } + + Napi::Value NodeOffsetsForTimes(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); + + if (info.Length() < 3 || !info[0].IsArray()) { + // Just throw an exception + Napi::Error::New(env, "Need to specify an array of topic partitions").ThrowAsJavaScriptException(); + return env.Null(); + } + + std::vector toppars = + Conversion::TopicPartition::FromV8Array(info[0].As()); + + int timeout_ms; + Napi::Maybe maybeTimeout = + info[1].As(.As().Uint32Value()); + + if (maybeTimeout.IsNothing()) { + timeout_ms = 1000; + } else { + timeout_ms = static_cast(maybeTimeout); + } + + Napi::Function cb = info[2].As(); + Napi::FunctionReference callback = Napi::Persistent(cb); + + Connection* handle = this; + + Napi::AsyncQueueWorker( + new Workers::Handle::OffsetsForTimes(callback, handle, + toppars, timeout_ms)); + + return env.Null(); + } + + Napi::Value NodeQueryWatermarkOffsets(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); + + Connection* obj = ObjectWrap::Unwrap(info.This()); + + if (!info[0].IsString()) { + Napi::Error::New(env, "1st parameter must be a topic string").ThrowAsJavaScriptException(); + ; + return; + } + + if (!info[1].IsNumber()) { + Napi::Error::New(env, "2nd parameter must be a partition number").ThrowAsJavaScriptException(); + return env.Null(); + } + + if (!info[2].IsNumber()) { + Napi::Error::New(env, "3rd parameter must be a number of milliseconds").ThrowAsJavaScriptException(); + return env.Null(); + } + + if (!info[3].IsFunction()) { + Napi::Error::New(env, "4th parameter must be a callback").ThrowAsJavaScriptException(); + return env.Null(); + } + + // Get string pointer for the topic name + std::string topicUTF8 = info[0].As(.To()); + // The first parameter is the topic + std::string topic_name(*topicUTF8); + + // Second parameter is the partition + int32_t partition = info[1].As().Int32Value(); + + // Third parameter is the timeout + int timeout_ms = info[2].As().Int32Value(); + + // Fourth parameter is the callback + Napi::Function cb = info[3].As(); + Napi::FunctionReference *callback = new Napi::FunctionReference(cb); + + Napi::AsyncQueueWorker(new Workers::ConnectionQueryWatermarkOffsets( + callback, obj, topic_name, partition, timeout_ms)); + + return env.Null(); + } + + Napi::Value NodeSetSaslCredentials(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + if (!info[0].IsString()) { + Napi::Error::New(env, "1st parameter must be a username string").ThrowAsJavaScriptException(); + return env.Null(); + } + + if (!info[1].IsString()) { + Napi::Error::New(env, "2nd parameter must be a password string").ThrowAsJavaScriptException(); + return env.Null(); + } + + // Get string pointer for the username + std::string usernameUTF8 = info[0].As(.To()); + // The first parameter is the username + std::string username(*usernameUTF8); + + // Get string pointer for the password + std::string passwordUTF8 = info[1].As(.To()); + // The first parameter is the password + std::string password(*passwordUTF8); + + Connection* obj = ObjectWrap::Unwrap(info.This()); + Baton b = obj->SetSaslCredentials(username, password); + + if (b.err() != RdKafka::ERR_NO_ERROR) { + Napi::Value errorObject = b.ToObject(); + Napi::Error::New(env, errorObject).ThrowAsJavaScriptException(); + return env.Null(); + } + + return env.Null(); + } + + + // Node methods + Napi::Value NodeConfigureCallbacks(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + Napi::HandleScope scope(env); + + if (info.Length() < 2 || + !info[0].IsBoolean() || + !info[1].IsObject()) { + // Just throw an exception + Napi::Error::New(env, "Need to specify a callbacks object").ThrowAsJavaScriptException(); + return env.Null(); + } + v8::Local context = Napi::GetCurrentContext(); + Connection* obj = ObjectWrap::Unwrap(info.This()); + + const bool add = info[0].As().Value().ToChecked(); + Napi::Object configs_object = + info[1].ToObject(context); + Napi::Array configs_property_names = + configs_object->GetOwnPropertyNames(context); + + for (unsigned int j = 0; j < configs_property_names->Length(); ++j) { + std::string configs_string_key; + + Napi::Value configs_key = + (configs_property_names).Get(j); + Napi::Value configs_value = + (configs_object).Get(configs_key); + + int config_type = 0; + if (configs_value.IsObject() && configs_key.IsString()) { + std::string configs_utf8_key = configs_key.As(); + configs_string_key = std::string(*configs_utf8_key); + if (configs_string_key.compare("global") == 0) { + config_type = 1; + } else if (configs_string_key.compare("topic") == 0) { + config_type = 2; + } else if (configs_string_key.compare("event") == 0) { + config_type = 3; + } else { + continue; + } + } else { + continue; + } + + Napi::Object object = + configs_value->ToObject(context); + Napi::Array property_names = + object->GetOwnPropertyNames(context); + + for (unsigned int i = 0; i < property_names->Length(); ++i) { + std::string errstr; + std::string string_key; + + Napi::Value key = (property_names).Get(i); + Napi::Value value = (object).Get(key); + + if (key.IsString()) { + std::string utf8_key = key.As(); + string_key = std::string(*utf8_key); + } else { + continue; + } + + if (value->IsFunction()) { + Napi::Function cb = value.As(); + switch (config_type) { + case 1: + obj->m_gconfig->ConfigureCallback(string_key, cb, add, errstr); + if (!errstr.empty()) { + Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); + return env.Null(); + } + break; + case 2: + obj->m_tconfig->ConfigureCallback(string_key, cb, add, errstr); + if (!errstr.empty()) { + Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); + return env.Null(); + } + break; + case 3: + obj->ConfigureCallback(string_key, cb, add); + break; + } + } + } + } + + return env.True(); + } + + Napi::Value NodeSetOAuthBearerToken(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + if (!info[0].IsString()) { + Napi::Error::New(env, "1st parameter must be a token string").ThrowAsJavaScriptException(); + return env.Null(); + } + + if (!info[1].IsNumber()) { + Napi::Error::New(env, "2nd parameter must be a lifetime_ms number").ThrowAsJavaScriptException(); + return env.Null(); + } + + if (!info[2].IsString()) { + Napi::Error::New(env, "3rd parameter must be a principal_name string").ThrowAsJavaScriptException(); + return env.Null(); + } + + if (!info[3].IsNullOrUndefined() && !info[3].IsArray()) { + Napi::Error::New(env, "4th parameter must be an extensions array or null").ThrowAsJavaScriptException(); + return env.Null(); + } + + // Get string pointer for the token + std::string tokenUtf8 = info[0].As(.To()); + std::string token(*tokenUtf8); + + // Get the lifetime_ms + int64_t lifetime_ms = info[1].As().Int64Value(); + + // Get string pointer for the principal_name + std::string principal_nameUtf8 = + info[2].As(.To()); + std::string principal_name(*principal_nameUtf8); + + // Get the extensions (if any) + std::list extensions; + if (!info[3].IsNullOrUndefined()) { + Napi::Array extensionsArray = info[3].As(); + extensions = v8ArrayToStringList(extensionsArray); + } + + Connection* obj = ObjectWrap::Unwrap(info.This()); + Baton b = + obj->SetOAuthBearerToken(token, lifetime_ms, principal_name, extensions); + + if (b.err() != RdKafka::ERR_NO_ERROR) { + Napi::Value errorObject = b.ToObject(); + Napi::Error::New(env, errorObject).ThrowAsJavaScriptException(); + return env.Null(); + } + + return env.Null(); + } + + Napi::Value NodeSetOAuthBearerTokenFailure(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + if (!info[0].IsString()) { + Napi::Error::New(env, "1st parameter must be an error string").ThrowAsJavaScriptException(); + return env.Null(); + } + + // Get string pointer for the error string + std::string errstrUtf8 = info[0].As(.To()); + std::string errstr(*errstrUtf8); + + Connection* obj = ObjectWrap::Unwrap(info.This()); + Baton b = obj->SetOAuthBearerTokenFailure(errstr); + + if (b.err() != RdKafka::ERR_NO_ERROR) { + Napi::Value errorObject = b.ToObject(); + Napi::Error::New(env, errorObject).ThrowAsJavaScriptException(); + return env.Null(); + } + + return env.Null(); + } - static Napi::Value NodeConfigureCallbacks(const Napi::CallbackInfo& info); - static Napi::Value NodeGetMetadata(const Napi::CallbackInfo& info); - static Napi::Value NodeQueryWatermarkOffsets(const Napi::CallbackInfo& info); - static Napi::Value NodeOffsetsForTimes(const Napi::CallbackInfo& info); - static Napi::Value NodeSetSaslCredentials(const Napi::CallbackInfo& info); - static Napi::Value NodeSetOAuthBearerToken(const Napi::CallbackInfo& info); - static Napi::Value NodeSetOAuthBearerTokenFailure(const Napi::CallbackInfo& info); - static Napi::Value NodeName(const Napi::CallbackInfo& info); + Napi::Value NodeName(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + Connection* obj = ObjectWrap::Unwrap(info.This()); + std::string name = obj->Name(); + return Napi::New(env, name); + } + }; } // namespace NodeKafka diff --git a/src/kafka-consumer.cc b/src/kafka-consumer.cc index 0a2c7650..8f9550e1 100644 --- a/src/kafka-consumer.cc +++ b/src/kafka-consumer.cc @@ -15,8 +15,6 @@ #include "src/kafka-consumer.h" #include "src/workers.h" -using Napi::FunctionCallbackInfo; - namespace NodeKafka { /** @@ -524,71 +522,74 @@ std::string KafkaConsumer::RebalanceProtocol() { Napi::FunctionReference KafkaConsumer::constructor; -void KafkaConsumer::Init(Napi::Object exports) { +void KafkaConsumer::Init(Napi::Env env, Napi::Object exports) { Napi::HandleScope scope(env); - Napi::FunctionReference tpl = Napi::Function::New(env, New); - tpl->SetClassName(Napi::String::New(env, "KafkaConsumer")); - + Napi::Function KafkaConsumer = DefineClass(env, "KafkaConsumer", { + /* + * Lifecycle events inherited from NodeKafka::Connection + * + * @sa NodeKafka::Connection + */ + InstanceMethod("configureCallbacks", &KafkaConsumer::NodeConfigureCallbacks), /* - * Lifecycle events inherited from NodeKafka::Connection - * - * @sa NodeKafka::Connection + * @brief Methods to do with establishing state */ - InstanceMethod("configureCallbacks", &NodeConfigureCallbacks), - + InstanceMethod("connect", &KafkaConsumer::NodeConnect), + InstanceMethod("disconnect", &KafkaConsumer::NodeDisconnect), + InstanceMethod("getMetadata", &KafkaConsumer::NodeGetMetadata), + InstanceMethod("queryWatermarkOffsets", &KafkaConsumer::NodeQueryWatermarkOffsets), // NOLINT + InstanceMethod("offsetsForTimes", &KafkaConsumer::NodeOffsetsForTimes), + InstanceMethod("getWatermarkOffsets", &KafkaConsumer::NodeGetWatermarkOffsets), + InstanceMethod("setSaslCredentials", &KafkaConsumer::NodeSetSaslCredentials), + InstanceMethod("setOAuthBearerToken", &KafkaConsumer::NodeSetOAuthBearerToken), + StaticMethod("setOAuthBearerTokenFailure", &KafkaConsumer::NodeSetOAuthBearerTokenFailure), + + /* + * @brief Methods exposed to do with message retrieval + */ + InstanceMethod("subscription", &KafkaConsumer::NodeSubscription), + InstanceMethod("subscribe", &KafkaConsumer::NodeSubscribe), + InstanceMethod("unsubscribe", &KafkaConsumer::NodeUnsubscribe), + InstanceMethod("consumeLoop", &KafkaConsumer::NodeConsumeLoop), + InstanceMethod("consume", &KafkaConsumer::NodeConsume), + InstanceMethod("seek", &KafkaConsumer::NodeSeek), + + + /** + * @brief Pausing and resuming + */ + InstanceMethod("pause", &KafkaConsumer::NodePause), + InstanceMethod("resume", &KafkaConsumer::NodeResume), + + /* - * @brief Methods to do with establishing state + * @brief Methods to do with partition assignment / rebalancing */ - InstanceMethod("connect", &NodeConnect), - InstanceMethod("disconnect", &NodeDisconnect), - InstanceMethod("getMetadata", &NodeGetMetadata), - InstanceMethod("queryWatermarkOffsets", &NodeQueryWatermarkOffsets), // NOLINT - InstanceMethod("offsetsForTimes", &NodeOffsetsForTimes), - InstanceMethod("getWatermarkOffsets", &NodeGetWatermarkOffsets), - InstanceMethod("setSaslCredentials", &NodeSetSaslCredentials), - InstanceMethod("setOAuthBearerToken", &NodeSetOAuthBearerToken), - Napi::SetPrototypeMethod(tpl, "setOAuthBearerTokenFailure", - NodeSetOAuthBearerTokenFailure); + InstanceMethod("committed", &KafkaConsumer::NodeCommitted), + InstanceMethod("position", &KafkaConsumer::NodePosition), + InstanceMethod("assign", &KafkaConsumer::NodeAssign), + InstanceMethod("unassign", &KafkaConsumer::NodeUnassign), + InstanceMethod("incrementalAssign", &KafkaConsumer::NodeIncrementalAssign), + InstanceMethod("incrementalUnassign", &KafkaConsumer::NodeIncrementalUnassign), + InstanceMethod("assignments", &KafkaConsumer::NodeAssignments), + InstanceMethod("assignmentLost", &KafkaConsumer::NodeAssignmentLost), + InstanceMethod("rebalanceProtocol", &KafkaConsumer::NodeRebalanceProtocol), - /* - * @brief Methods exposed to do with message retrieval - */ - InstanceMethod("subscription", &NodeSubscription), - InstanceMethod("subscribe", &NodeSubscribe), - InstanceMethod("unsubscribe", &NodeUnsubscribe), - InstanceMethod("consumeLoop", &NodeConsumeLoop), - InstanceMethod("consume", &NodeConsume), - InstanceMethod("seek", &NodeSeek), - - /** - * @brief Pausing and resuming - */ - InstanceMethod("pause", &NodePause), - InstanceMethod("resume", &NodeResume), + InstanceMethod("commit", &KafkaConsumer::NodeCommit), + InstanceMethod("commitSync", &KafkaConsumer::NodeCommitSync), + InstanceMethod("commitCb", &KafkaConsumer::NodeCommitCb), + InstanceMethod("offsetsStore", &KafkaConsumer::NodeOffsetsStore), + InstanceMethod("offsetsStoreSingle", &KafkaConsumer::NodeOffsetsStoreSingle), + }); - /* - * @brief Methods to do with partition assignment / rebalancing - */ - InstanceMethod("committed", &NodeCommitted), - InstanceMethod("position", &NodePosition), - InstanceMethod("assign", &NodeAssign), - InstanceMethod("unassign", &NodeUnassign), - InstanceMethod("incrementalAssign", &NodeIncrementalAssign), - InstanceMethod("incrementalUnassign", &NodeIncrementalUnassign), - InstanceMethod("assignments", &NodeAssignments), - InstanceMethod("assignmentLost", &NodeAssignmentLost), - InstanceMethod("rebalanceProtocol", &NodeRebalanceProtocol), - - InstanceMethod("commit", &NodeCommit), - InstanceMethod("commitSync", &NodeCommitSync), - InstanceMethod("commitCb", &NodeCommitCb), - InstanceMethod("offsetsStore", &NodeOffsetsStore), - InstanceMethod("offsetsStoreSingle", &NodeOffsetsStoreSingle), + // Napi::SetPrototypeMethod(tpl, "setOAuthBearerTokenFailure", + // NodeSetOAuthBearerTokenFailure); + constructor.Reset((tpl->GetFunction(Napi::GetCurrentContext())) ); diff --git a/src/kafka-consumer.h b/src/kafka-consumer.h index 774dd811..84a537f0 100644 --- a/src/kafka-consumer.h +++ b/src/kafka-consumer.h @@ -36,10 +36,10 @@ namespace NodeKafka { * @sa NodeKafka::Client */ -class KafkaConsumer : public Connection { +class KafkaConsumer : public Connection { friend class Producer; public: - static void Init(Napi::Object); + static void Init(Napi::Env env, Napi::Object); static Napi::Object NewInstance(Napi::Value); Baton Connect(); @@ -115,32 +115,32 @@ class KafkaConsumer : public Connection { RdKafka::KafkaConsumer *m_consumer = nullptr; // Node methods - static Napi::Value NodeConnect(const Napi::CallbackInfo& info); - static Napi::Value NodeSubscribe(const Napi::CallbackInfo& info); - static Napi::Value NodeDisconnect(const Napi::CallbackInfo& info); - static Napi::Value NodeAssign(const Napi::CallbackInfo& info); - static Napi::Value NodeUnassign(const Napi::CallbackInfo& info); - static Napi::Value NodeIncrementalAssign(const Napi::CallbackInfo& info); - static Napi::Value NodeIncrementalUnassign(const Napi::CallbackInfo& info); - static Napi::Value NodeAssignments(const Napi::CallbackInfo& info); - static Napi::Value NodeAssignmentLost(const Napi::CallbackInfo& info); - static Napi::Value NodeRebalanceProtocol(const Napi::CallbackInfo& info); - static Napi::Value NodeUnsubscribe(const Napi::CallbackInfo& info); - static Napi::Value NodeCommit(const Napi::CallbackInfo& info); - static Napi::Value NodeCommitSync(const Napi::CallbackInfo& info); - static Napi::Value NodeCommitCb(const Napi::CallbackInfo& info); - static Napi::Value NodeOffsetsStore(const Napi::CallbackInfo& info); - static Napi::Value NodeOffsetsStoreSingle(const Napi::CallbackInfo& info); - static Napi::Value NodeCommitted(const Napi::CallbackInfo& info); - static Napi::Value NodePosition(const Napi::CallbackInfo& info); - static Napi::Value NodeSubscription(const Napi::CallbackInfo& info); - static Napi::Value NodeSeek(const Napi::CallbackInfo& info); - static Napi::Value NodeGetWatermarkOffsets(const Napi::CallbackInfo& info); - static Napi::Value NodeConsumeLoop(const Napi::CallbackInfo& info); - static Napi::Value NodeConsume(const Napi::CallbackInfo& info); - - static Napi::Value NodePause(const Napi::CallbackInfo& info); - static Napi::Value NodeResume(const Napi::CallbackInfo& info); + Napi::Value NodeConnect(const Napi::CallbackInfo& info); + Napi::Value NodeSubscribe(const Napi::CallbackInfo& info); + Napi::Value NodeDisconnect(const Napi::CallbackInfo& info); + Napi::Value NodeAssign(const Napi::CallbackInfo& info); + Napi::Value NodeUnassign(const Napi::CallbackInfo& info); + Napi::Value NodeIncrementalAssign(const Napi::CallbackInfo& info); + Napi::Value NodeIncrementalUnassign(const Napi::CallbackInfo& info); + Napi::Value NodeAssignments(const Napi::CallbackInfo& info); + Napi::Value NodeAssignmentLost(const Napi::CallbackInfo& info); + Napi::Value NodeRebalanceProtocol(const Napi::CallbackInfo& info); + Napi::Value NodeUnsubscribe(const Napi::CallbackInfo& info); + Napi::Value NodeCommit(const Napi::CallbackInfo& info); + Napi::Value NodeCommitSync(const Napi::CallbackInfo& info); + Napi::Value NodeCommitCb(const Napi::CallbackInfo& info); + Napi::Value NodeOffsetsStore(const Napi::CallbackInfo& info); + Napi::Value NodeOffsetsStoreSingle(const Napi::CallbackInfo& info); + Napi::Value NodeCommitted(const Napi::CallbackInfo& info); + Napi::Value NodePosition(const Napi::CallbackInfo& info); + Napi::Value NodeSubscription(const Napi::CallbackInfo& info); + Napi::Value NodeSeek(const Napi::CallbackInfo& info); + Napi::Value NodeGetWatermarkOffsets(const Napi::CallbackInfo& info); + Napi::Value NodeConsumeLoop(const Napi::CallbackInfo& info); + Napi::Value NodeConsume(const Napi::CallbackInfo& info); + + Napi::Value NodePause(const Napi::CallbackInfo& info); + Napi::Value NodeResume(const Napi::CallbackInfo& info); }; } // namespace NodeKafka diff --git a/src/producer.h b/src/producer.h index 0a9c9374..5b079c8f 100644 --- a/src/producer.h +++ b/src/producer.h @@ -49,7 +49,7 @@ class ProducerMessage { bool m_is_empty; }; -class Producer : public Connection { +class Producer : public Connection { public: static void Init(Napi::Object); static Napi::Object NewInstance(Napi::Value); diff --git a/src/topic.h b/src/topic.h index 085e3800..f9bba3d6 100644 --- a/src/topic.h +++ b/src/topic.h @@ -17,6 +17,7 @@ #include "rdkafkacpp.h" // NOLINT #include "src/config.h" +#include "src/connection.h" namespace NodeKafka { @@ -25,7 +26,7 @@ class Topic : public Napi::ObjectWrap { static void Init(Napi::Object); static Napi::Object NewInstance(Napi::Value arg); - Baton toRDKafkaTopic(Connection *handle); + template Baton toRDKafkaTopic(Connection *handle); protected: static Napi::FunctionReference constructor; From 58c78d1e42c0ab84c04e3283960f91f510023d94 Mon Sep 17 00:00:00 2001 From: Charles Lowell Date: Tue, 25 Mar 2025 08:05:28 -0500 Subject: [PATCH 05/14] fix ErrorAwareWorker --- src/admin.h | 2 +- src/workers.h | 18 ++++++++---------- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/src/admin.h b/src/admin.h index a4bba54e..06825416 100644 --- a/src/admin.h +++ b/src/admin.h @@ -37,7 +37,7 @@ namespace NodeKafka { * @sa NodeKafka::Client */ -class AdminClient : public Connection { +class AdminClient : public Connection { public: static void Init(Napi::Object); static Napi::Object NewInstance(Napi::Value); diff --git a/src/workers.h b/src/workers.h index 9fad8c20..3fd7674d 100644 --- a/src/workers.h +++ b/src/workers.h @@ -30,24 +30,22 @@ namespace Workers { class ErrorAwareWorker : public Napi::AsyncWorker { public: explicit ErrorAwareWorker(Napi::FunctionReference* callback_) : - Napi::AsyncWorker(callback_), + Napi::AsyncWorker(callback_->Value()), m_baton(RdKafka::ERR_NO_ERROR) {} virtual ~ErrorAwareWorker() {} virtual void Execute() = 0; virtual void OnOK() = 0; - void OnError() { + void OnError(const Napi::Error &e) { + Napi::Env env = e.Env(); Napi::HandleScope scope(env); // Construct error and add code to it. - Napi::Value error = Napi::Error::New(env, ErrorMessage()); - (error.As()).Set(Napi::String::New(env, "code"), - Napi::New(env, GetErrorCode())); + Napi::Error error = Napi::Error::New(env, e.Message()); + (error.Value().As()).Set(Napi::String::New(env, "code"), + Napi::Number::New(env, GetErrorCode())); - const unsigned int argc = 1; - Napi::Value argv[argc] = { error }; - - callback->Call(argc, argv); + Napi::AsyncWorker::OnError(error); } protected: @@ -60,7 +58,7 @@ class ErrorAwareWorker : public Napi::AsyncWorker { } void SetErrorBaton(const NodeKafka::Baton & baton) { m_baton = baton; - SetErrorMessage(m_baton.errstr().c_str()); + SetError(m_baton.errstr().c_str()); } int GetErrorCode() { From e23e5f26f91ebf7bc7c021b7da7aaa9ae2b65cc0 Mon Sep 17 00:00:00 2001 From: Charles Lowell Date: Tue, 25 Mar 2025 09:03:57 -0500 Subject: [PATCH 06/14] add back static setOauthBearerFailure method --- src/connection.h | 2 +- src/kafka-consumer.cc | 47 ++++++++++++++++++++----------------------- 2 files changed, 23 insertions(+), 26 deletions(-) diff --git a/src/connection.h b/src/connection.h index 4521d695..2217ea5d 100644 --- a/src/connection.h +++ b/src/connection.h @@ -697,7 +697,7 @@ template class Connection : public Napi::ObjectWrap { return env.Null(); } - Napi::Value NodeSetOAuthBearerTokenFailure(const Napi::CallbackInfo &info) { + static Napi::Value NodeSetOAuthBearerTokenFailure(const Napi::CallbackInfo &info) { Napi::Env env = info.Env(); if (!info[0].IsString()) { Napi::Error::New(env, "1st parameter must be an error string").ThrowAsJavaScriptException(); diff --git a/src/kafka-consumer.cc b/src/kafka-consumer.cc index 8f9550e1..740361d9 100644 --- a/src/kafka-consumer.cc +++ b/src/kafka-consumer.cc @@ -533,10 +533,10 @@ void KafkaConsumer::Init(Napi::Env env, Napi::Object exports) { */ InstanceMethod("configureCallbacks", &KafkaConsumer::NodeConfigureCallbacks), - /* - * @brief Methods to do with establishing state - */ + /* + * @brief Methods to do with establishing state + */ InstanceMethod("connect", &KafkaConsumer::NodeConnect), InstanceMethod("disconnect", &KafkaConsumer::NodeDisconnect), InstanceMethod("getMetadata", &KafkaConsumer::NodeGetMetadata), @@ -565,30 +565,27 @@ void KafkaConsumer::Init(Napi::Env env, Napi::Object exports) { InstanceMethod("resume", &KafkaConsumer::NodeResume), - /* - * @brief Methods to do with partition assignment / rebalancing - */ - - InstanceMethod("committed", &KafkaConsumer::NodeCommitted), - InstanceMethod("position", &KafkaConsumer::NodePosition), - InstanceMethod("assign", &KafkaConsumer::NodeAssign), - InstanceMethod("unassign", &KafkaConsumer::NodeUnassign), - InstanceMethod("incrementalAssign", &KafkaConsumer::NodeIncrementalAssign), - InstanceMethod("incrementalUnassign", &KafkaConsumer::NodeIncrementalUnassign), - InstanceMethod("assignments", &KafkaConsumer::NodeAssignments), - InstanceMethod("assignmentLost", &KafkaConsumer::NodeAssignmentLost), - InstanceMethod("rebalanceProtocol", &KafkaConsumer::NodeRebalanceProtocol), - - InstanceMethod("commit", &KafkaConsumer::NodeCommit), - InstanceMethod("commitSync", &KafkaConsumer::NodeCommitSync), - InstanceMethod("commitCb", &KafkaConsumer::NodeCommitCb), - InstanceMethod("offsetsStore", &KafkaConsumer::NodeOffsetsStore), - InstanceMethod("offsetsStoreSingle", &KafkaConsumer::NodeOffsetsStoreSingle), - }); + /* + * @brief Methods to do with partition assignment / rebalancing + */ + InstanceMethod("committed", &KafkaConsumer::NodeCommitted), + InstanceMethod("position", &KafkaConsumer::NodePosition), + InstanceMethod("assign", &KafkaConsumer::NodeAssign), + InstanceMethod("unassign", &KafkaConsumer::NodeUnassign), + InstanceMethod("incrementalAssign", &KafkaConsumer::NodeIncrementalAssign), + InstanceMethod("incrementalUnassign", &KafkaConsumer::NodeIncrementalUnassign), + InstanceMethod("assignments", &KafkaConsumer::NodeAssignments), + InstanceMethod("assignmentLost", &KafkaConsumer::NodeAssignmentLost), + InstanceMethod("rebalanceProtocol", &KafkaConsumer::NodeRebalanceProtocol), + + InstanceMethod("commit", &KafkaConsumer::NodeCommit), + InstanceMethod("commitSync", &KafkaConsumer::NodeCommitSync), + InstanceMethod("commitCb", &KafkaConsumer::NodeCommitCb), + InstanceMethod("offsetsStore", &KafkaConsumer::NodeOffsetsStore), + InstanceMethod("offsetsStoreSingle", &KafkaConsumer::NodeOffsetsStoreSingle), + }); - // Napi::SetPrototypeMethod(tpl, "setOAuthBearerTokenFailure", - // NodeSetOAuthBearerTokenFailure); constructor.Reset((tpl->GetFunction(Napi::GetCurrentContext())) From 548443e6a1a0c8c2234f62c48b9618946e53b510 Mon Sep 17 00:00:00 2001 From: Charles Lowell Date: Tue, 25 Mar 2025 23:17:58 -0500 Subject: [PATCH 07/14] Sort out error problems --- src/common.h | 2 - src/connection.h | 11 +- src/errors.cc | 23 ++-- src/errors.h | 14 +- src/kafka-consumer.cc | 310 +++++++++++++++++++----------------------- src/kafka-consumer.h | 5 +- src/workers.h | 64 ++++----- 7 files changed, 201 insertions(+), 228 deletions(-) diff --git a/src/common.h b/src/common.h index 0a393f4e..4d500003 100644 --- a/src/common.h +++ b/src/common.h @@ -22,8 +22,6 @@ #include "rdkafkacpp.h" // NOLINT #include "rdkafka.h" // NOLINT -#include "src/errors.h" - typedef std::vector BrokerMetadataList; typedef std::vector PartitionMetadataList; typedef std::vector TopicMetadataList; diff --git a/src/connection.h b/src/connection.h index 2217ea5d..ba94c035 100644 --- a/src/connection.h +++ b/src/connection.h @@ -283,7 +283,7 @@ template class Connection : public Napi::ObjectWrap { protected: - Connection(const Napi::CallbackInfo &info) { + Connection(const Napi::CallbackInfo &info): m_event_cb() { Napi::Env env = info.Env(); if (!info.IsConstructCall()) { Napi::Error::New(env, "non-constructor invocation not supported").ThrowAsJavaScriptException(); @@ -294,10 +294,11 @@ template class Connection : public Napi::ObjectWrap { } } - Connection(Conf* gconfig, Conf* tconfig): - m_event_cb(), - m_gconfig(gconfig), - m_tconfig(tconfig) { + + void Config(Conf *gconfig, Conf *tconfig) { + this->m_gconfig = gconfig; + this->m_tconfig = tconfig; + std::string errstr; m_client = NULL; diff --git a/src/errors.cc b/src/errors.cc index d0a68efe..91fb11fd 100644 --- a/src/errors.cc +++ b/src/errors.cc @@ -14,11 +14,11 @@ namespace NodeKafka { -Napi::Object RdKafkaError(const RdKafka::ErrorCode &err, +Napi::Error RdKafkaError(const Napi::Env& env, const RdKafka::ErrorCode &err, const std::string &errstr) { int code = static_cast(err); - Napi::Object ret = Napi::Object::New(env); + Napi::Error ret = Napi::Error::New(env); (ret).Set(Napi::String::New(env, "message"), Napi::String::New(env, errstr)); @@ -28,15 +28,16 @@ Napi::Object RdKafkaError(const RdKafka::ErrorCode &err, return ret; } -Napi::Object RdKafkaError(const RdKafka::ErrorCode &err) { +Napi::Error RdKafkaError(const Napi::Env& env, const RdKafka::ErrorCode &err) { std::string errstr = RdKafka::err2str(err); - return RdKafkaError(err, errstr); + return RdKafkaError(env, err, errstr); } -Napi::Object RdKafkaError( +Napi::Error RdKafkaError( + const Napi::Env& env, const RdKafka::ErrorCode &err, std::string errstr, bool isFatal, bool isRetriable, bool isTxnRequiresAbort) { - Napi::Object ret = RdKafkaError(err, errstr); + Napi::Error ret = RdKafkaError(env, err, errstr); (ret).Set(Napi::String::New(env, "isFatal"), Napi::Boolean::New(env, isFatal)); @@ -92,16 +93,16 @@ Baton Baton::BatonFromErrorAndDestroy(RdKafka::Error *error) { return Baton(err, errstr); } -Napi::Object Baton::ToObject() { +Napi::Error Baton::ToError(const Napi::Env& env) { if (m_errstr.empty()) { - return RdKafkaError(m_err); + return RdKafkaError(env, m_err); } else { - return RdKafkaError(m_err, m_errstr); + return RdKafkaError(env, m_err, m_errstr); } } -Napi::Object Baton::ToTxnObject() { - return RdKafkaError(m_err, m_errstr, m_isFatal, m_isRetriable, m_isTxnRequiresAbort); // NOLINT +Napi::Error Baton::ToTxnError(const Napi::Env& env) { + return RdKafkaError(env, m_err, m_errstr, m_isFatal, m_isRetriable, m_isTxnRequiresAbort); // NOLINT } RdKafka::ErrorCode Baton::err() { diff --git a/src/errors.h b/src/errors.h index 4538f844..d173212b 100644 --- a/src/errors.h +++ b/src/errors.h @@ -40,8 +40,8 @@ class Baton { RdKafka::ErrorCode err(); std::string errstr(); - Napi::Object ToObject(); - Napi::Object ToTxnObject(); + Napi::Error ToError(const Napi::Env &env); + Napi::Error ToTxnError(const Napi::Env &env); private: void* m_data; @@ -51,10 +51,12 @@ class Baton { bool m_isRetriable; bool m_isTxnRequiresAbort; }; - -Napi::Object RdKafkaError(const RdKafka::ErrorCode &); -Napi::Object RdKafkaError(const RdKafka::ErrorCode &, - const std::string &); + +Napi::Error RdKafkaError(const Napi::Env &env, const RdKafka::ErrorCode &); +Napi::Error RdKafkaError(const Napi::Env &env, const RdKafka::ErrorCode &, const std::string &); +Napi::Error RdKafkaError(const Napi::Env &env, const RdKafka::ErrorCode &err, std::string errstr, + bool isFatal, bool isRetriable, + bool isTxnRequiresAbort); } // namespace NodeKafka diff --git a/src/kafka-consumer.cc b/src/kafka-consumer.cc index 740361d9..3af86803 100644 --- a/src/kafka-consumer.cc +++ b/src/kafka-consumer.cc @@ -12,6 +12,7 @@ #include #include +#include "src/errors.h" #include "src/kafka-consumer.h" #include "src/workers.h" @@ -27,16 +28,55 @@ namespace NodeKafka { * @sa NodeKafka::Client */ -KafkaConsumer::KafkaConsumer(Conf* gconfig, Conf* tconfig): - Connection(gconfig, tconfig) { - std::string errstr; +KafkaConsumer::KafkaConsumer(const Napi::CallbackInfo& info): Connection(info) { + Napi::Env env = info.Env(); + if (!info.IsConstructCall()) { + Napi::Error::New(env, "non-constructor invocation not supported").ThrowAsJavaScriptException(); + return; + } + + if (info.Length() < 2) { + Napi::Error::New(env, "You must supply global and topic configuration").ThrowAsJavaScriptException(); + return; + } + + if (!info[0].IsObject()) { + Napi::Error::New(env, "Global configuration data must be specified").ThrowAsJavaScriptException(); + return; + } + + std::string errstr; + + Napi::Object i1 = info[0].ToObject(); + + Conf* gconfig = + Conf::create(RdKafka::Conf::CONF_GLOBAL, info[0].ToObject(), errstr); + + if (!gconfig) { + Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); + return; + } - if (m_tconfig) - m_gconfig->set("default_topic_conf", m_tconfig, errstr); + // If tconfig isn't set, then just let us pick properties from gconf. + Conf* tconfig = nullptr; + if (info[1].IsObject()) { + tconfig = Conf::create(RdKafka::Conf::CONF_TOPIC, info[1].ToObject(), errstr); - m_consume_loop = nullptr; + if (!tconfig) { + delete gconfig; + Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); + return; + } } + this->Config(gconfig, tconfig); + + if (m_tconfig) + m_gconfig->set("default_topic_conf", m_tconfig, errstr); + + m_consume_loop = nullptr; +} + KafkaConsumer::~KafkaConsumer() { // We only want to run this if it hasn't been run already Disconnect(); @@ -586,85 +626,28 @@ void KafkaConsumer::Init(Napi::Env env, Napi::Object exports) { InstanceMethod("offsetsStoreSingle", &KafkaConsumer::NodeOffsetsStoreSingle), }); - - - constructor.Reset((tpl->GetFunction(Napi::GetCurrentContext())) - ); - (exports).Set(Napi::String::New(env, "KafkaConsumer"), - (tpl->GetFunction(Napi::GetCurrentContext()))); + constructor.Reset(KafkaConsumer); + exports.Set(Napi::String::New(env, "KafkaConsumer"), KafkaConsumer); } -void KafkaConsumer::New(const Napi::CallbackInfo& info) { - Napi::Env env = info.Env(); - if (!info.IsConstructCall()) { - Napi::Error::New(env, "non-constructor invocation not supported").ThrowAsJavaScriptException(); - return env.Null(); - } - - if (info.Length() < 2) { - Napi::Error::New(env, "You must supply global and topic configuration").ThrowAsJavaScriptException(); - return env.Null(); - } - - if (!info[0].IsObject()) { - Napi::Error::New(env, "Global configuration data must be specified").ThrowAsJavaScriptException(); - return env.Null(); - } +// Napi::Object KafkaConsumer::NewInstance(Napi::Value arg) { +// Napi::Env env = arg.Env(); +// Napi::EscapableHandleScope scope(env); - std::string errstr; - - Conf* gconfig = - Conf::create(RdKafka::Conf::CONF_GLOBAL, - (info[0].ToObject(Napi::GetCurrentContext())), errstr); +// const unsigned argc = 1; - if (!gconfig) { - Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); - return env.Null(); - } +// Napi::Value argv[argc] = { arg }; +// Napi::Function cons = Napi::Function::New(env, constructor); +// Napi::Object instance = +// Napi::NewInstance(cons, argc, argv); - // If tconfig isn't set, then just let us pick properties from gconf. - Conf* tconfig = nullptr; - if (info[1].IsObject()) { - tconfig = Conf::create(RdKafka::Conf::CONF_TOPIC, - (info[1].ToObject(Napi::GetCurrentContext())), errstr); - - if (!tconfig) { - delete gconfig; - Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); - return env.Null(); - } - } - - // TODO: fix this - this memory is leaked. - KafkaConsumer* consumer = new KafkaConsumer(gconfig, tconfig); - - // Wrap it - consumer->Wrap(info.This()); - - // Then there is some weird initialization that happens - // basically it sets the configuration data - // we don't need to do that because we lazy load it - - return info.This(); -} - -Napi::Object KafkaConsumer::NewInstance(Napi::Value arg) { - Napi::Env env = arg.Env(); - Napi::EscapableHandleScope scope(env); - - const unsigned argc = 1; - - Napi::Value argv[argc] = { arg }; - Napi::Function cons = Napi::Function::New(env, constructor); - Napi::Object instance = - Napi::NewInstance(cons, argc, argv); - - return scope.Escape(instance); -} +// return scope.Escape(instance); +// } /* Node exposed methods */ -Napi::Value KafkaConsumer::NodeCommitted(const Napi::CallbackInfo& info) { +Napi::Value KafkaConsumer::NodeCommitted(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 3 || !info[0].IsArray()) { @@ -718,11 +701,10 @@ Napi::Value KafkaConsumer::NodeSubscription(const Napi::CallbackInfo& info) { delete topics; } -Napi::Value KafkaConsumer::NodePosition(const Napi::CallbackInfo& info) { +Napi::Value KafkaConsumer::NodePosition(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); Napi::HandleScope scope(env); - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - if (info.Length() < 1 || !info[0].IsArray()) { // Just throw an exception Napi::Error::New(env, "Need to specify an array of topic partitions").ThrowAsJavaScriptException(); @@ -732,12 +714,12 @@ Napi::Value KafkaConsumer::NodePosition(const Napi::CallbackInfo& info) { std::vector toppars = Conversion::TopicPartition::FromV8Array(info[0].As()); - Baton b = consumer->Position(toppars); + Baton b = this->Position(toppars); if (b.err() != RdKafka::ErrorCode::ERR_NO_ERROR) { // Let the JS library throw if we need to so the error can be more rich int error_code = static_cast(b.err()); - return return Napi::Number::New(env, error_code); + return Napi::Number::New(env, error_code); } return @@ -748,40 +730,40 @@ Napi::Value KafkaConsumer::NodePosition(const Napi::CallbackInfo& info) { } Napi::Value KafkaConsumer::NodeAssignments(const Napi::CallbackInfo& info) { + Napi::Env env = info.Env(); Napi::HandleScope scope(env); - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - Baton b = consumer->RefreshAssignments(); + Baton b = this->RefreshAssignments(); if (b.err() != RdKafka::ERR_NO_ERROR) { // Let the JS library throw if we need to so the error can be more rich int error_code = static_cast(b.err()); - return return Napi::Number::New(env, error_code); + return Napi::Number::New(env, error_code); } return - Conversion::TopicPartition::ToV8Array(consumer->m_partitions); + Conversion::TopicPartition::ToV8Array(this->m_partitions); } -Napi::Value KafkaConsumer::NodeAssignmentLost(const Napi::CallbackInfo& info) { +Napi::Value KafkaConsumer::NodeAssignmentLost(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); Napi::HandleScope scope(env); - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - - Baton b = consumer->AssignmentLost(); + Baton b = this->AssignmentLost(); bool lost = b.data(); return Napi::Boolean::New(env, lost); } -Napi::Value KafkaConsumer::NodeRebalanceProtocol(const Napi::CallbackInfo& info) { - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - std::string protocol = consumer->RebalanceProtocol(); +Napi::Value KafkaConsumer::NodeRebalanceProtocol(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + std::string protocol = this->RebalanceProtocol(); return Napi::String::New(env, protocol); } Napi::Value KafkaConsumer::NodeAssign(const Napi::CallbackInfo& info) { + Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 1 || !info[0].IsArray()) { @@ -793,13 +775,10 @@ Napi::Value KafkaConsumer::NodeAssign(const Napi::CallbackInfo& info) { Napi::Array partitions = info[0].As(); std::vector topic_partitions; - for (unsigned int i = 0; i < partitions->Length(); ++i) { - Napi::Value partition_obj_value; - if (!( - (partitions).Get(i).ToLocal(&partition_obj_value) && - partition_obj_value.IsObject())) { + for (unsigned int i = 0; i < partitions.Length(); ++i) { + Napi::Value partition_obj_value = partitions.Get(i); + if (!partition_obj_value.IsObject()) { Napi::Error::New(env, "Must pass topic-partition objects").ThrowAsJavaScriptException(); - } Napi::Object partition_obj = partition_obj_value.As(); @@ -829,41 +808,38 @@ Napi::Value KafkaConsumer::NodeAssign(const Napi::CallbackInfo& info) { } } - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - // Hand over the partitions to the consumer. - Baton b = consumer->Assign(topic_partitions); + Baton b = this->Assign(topic_partitions); if (b.err() != RdKafka::ERR_NO_ERROR) { Napi::Error::New(env, RdKafka::err2str(b.err()).c_str()).ThrowAsJavaScriptException(); } - return env.True(); + return Napi::Value::From(env, true); } -Napi::Value KafkaConsumer::NodeUnassign(const Napi::CallbackInfo& info) { +Napi::Value KafkaConsumer::NodeUnassign(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); Napi::HandleScope scope(env); - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - - - if (!consumer->IsClosing() && !consumer->IsConnected()) { + if (!this->IsClosing() && !this->IsConnected()) { Napi::Error::New(env, "KafkaConsumer is disconnected").ThrowAsJavaScriptException(); return env.Null(); } - Baton b = consumer->Unassign(); + Baton b = this->Unassign(); if (b.err() != RdKafka::ERR_NO_ERROR) { Napi::Error::New(env, RdKafka::err2str(b.err()).c_str()).ThrowAsJavaScriptException(); } - return env.True(); + return Napi::Value::From(env, true); } -Napi::Value KafkaConsumer::NodeIncrementalAssign(const Napi::CallbackInfo& info) { +Napi::Value KafkaConsumer::NodeIncrementalAssign(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 1 || !info[0].IsArray()) { @@ -875,13 +851,11 @@ Napi::Value KafkaConsumer::NodeIncrementalAssign(const Napi::CallbackInfo& info) Napi::Array partitions = info[0].As(); std::vector topic_partitions; - for (unsigned int i = 0; i < partitions->Length(); ++i) { - Napi::Value partition_obj_value; - if (!( - (partitions).Get(i).ToLocal(&partition_obj_value) && - partition_obj_value.IsObject())) { + for (unsigned int i = 0; i < partitions.Length(); ++i) { + Napi::Value partition_obj_value = partitions.Get(i); + if (!partition_obj_value.IsObject()) { Napi::Error::New(env, "Must pass topic-partition objects").ThrowAsJavaScriptException(); - + return env.Null(); } Napi::Object partition_obj = partition_obj_value.As(); @@ -911,21 +885,18 @@ Napi::Value KafkaConsumer::NodeIncrementalAssign(const Napi::CallbackInfo& info) } } - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - // Hand over the partitions to the consumer. - Baton b = consumer->IncrementalAssign(topic_partitions); + Baton b = this->IncrementalAssign(topic_partitions); if (b.err() != RdKafka::ERR_NO_ERROR) { - Napi::Value errorObject = b.ToObject(); - Napi::Error::New(env, errorObject).ThrowAsJavaScriptException(); - + b.ToError(env).ThrowAsJavaScriptException(); } - return env.True(); + return Napi::Value::From(env, true); } -Napi::Value KafkaConsumer::NodeIncrementalUnassign(const Napi::CallbackInfo& info) { +Napi::Value KafkaConsumer::NodeIncrementalUnassign(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 1 || !info[0].IsArray()) { @@ -937,13 +908,11 @@ Napi::Value KafkaConsumer::NodeIncrementalUnassign(const Napi::CallbackInfo& inf Napi::Array partitions = info[0].As(); std::vector topic_partitions; - for (unsigned int i = 0; i < partitions->Length(); ++i) { - Napi::Value partition_obj_value; - if (!( - (partitions).Get(i).ToLocal(&partition_obj_value) && - partition_obj_value.IsObject())) { + for (unsigned int i = 0; i < partitions.Length(); ++i) { + Napi::Value partition_obj_value = partitions.Get(i); + if (!partition_obj_value.IsObject()) { Napi::Error::New(env, "Must pass topic-partition objects").ThrowAsJavaScriptException(); - + return env.Null(); } Napi::Object partition_obj = partition_obj_value.As(); @@ -973,50 +942,47 @@ Napi::Value KafkaConsumer::NodeIncrementalUnassign(const Napi::CallbackInfo& inf } } - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - // Hand over the partitions to the consumer. - Baton b = consumer->IncrementalUnassign(topic_partitions); + Baton b = this->IncrementalUnassign(topic_partitions); if (b.err() != RdKafka::ERR_NO_ERROR) { - Napi::Value errorObject = b.ToObject(); - Napi::Error::New(env, errorObject).ThrowAsJavaScriptException(); + Napi::Error errorObject = b.ToError(env); + errorObject.ThrowAsJavaScriptException(); } - return env.True(); + return Napi::Value::From(env, true); } -Napi::Value KafkaConsumer::NodeUnsubscribe(const Napi::CallbackInfo& info) { +Napi::Value KafkaConsumer::NodeUnsubscribe(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - - Baton b = consumer->Unsubscribe(); + Baton b = this->Unsubscribe(); - return Napi::Number::New(env, static_cast(b.err())); + return Napi::Value::From(env, b.err()); } -Napi::Value KafkaConsumer::NodeCommit(const Napi::CallbackInfo& info) { +Napi::Value KafkaConsumer::NodeCommit(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); int error_code; - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - if (!consumer->IsConnected()) { + if (!this->IsConnected()) { Napi::Error::New(env, "KafkaConsumer is disconnected").ThrowAsJavaScriptException(); return env.Null(); } if (info[0].IsNull() || info[0].IsUndefined()) { - Baton b = consumer->Commit(); + Baton b = this->Commit(); error_code = static_cast(b.err()); } else if (info[0].IsArray()) { std::vector toppars = Conversion::TopicPartition::FromV8Array(info[0].As()); - Baton b = consumer->Commit(toppars); + Baton b = this->Commit(toppars); error_code = static_cast(b.err()); RdKafka::TopicPartition::destroy(toppars); @@ -1029,7 +995,7 @@ Napi::Value KafkaConsumer::NodeCommit(const Napi::CallbackInfo& info) { return env.Null(); } - Baton b = consumer->Commit(toppar); + Baton b = this->Commit(toppar); error_code = static_cast(b.err()); delete toppar; @@ -1041,25 +1007,24 @@ Napi::Value KafkaConsumer::NodeCommit(const Napi::CallbackInfo& info) { return Napi::Number::New(env, error_code); } -Napi::Value KafkaConsumer::NodeCommitSync(const Napi::CallbackInfo& info) { +Napi::Value KafkaConsumer::NodeCommitSync(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); int error_code; - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - - if (!consumer->IsConnected()) { + if (!this->IsConnected()) { Napi::Error::New(env, "KafkaConsumer is disconnected").ThrowAsJavaScriptException(); return env.Null(); } if (info[0].IsNull() || info[0].IsUndefined()) { - Baton b = consumer->CommitSync(); + Baton b = this->CommitSync(); error_code = static_cast(b.err()); } else if (info[0].IsArray()) { std::vector toppars = Conversion::TopicPartition::FromV8Array(info[0].As()); - Baton b = consumer->CommitSync(toppars); + Baton b = this->CommitSync(toppars); error_code = static_cast(b.err()); RdKafka::TopicPartition::destroy(toppars); @@ -1072,7 +1037,7 @@ Napi::Value KafkaConsumer::NodeCommitSync(const Napi::CallbackInfo& info) { return env.Null(); } - Baton b = consumer->CommitSync(toppar); + Baton b = this->CommitSync(toppar); error_code = static_cast(b.err()); delete toppar; @@ -1084,15 +1049,14 @@ Napi::Value KafkaConsumer::NodeCommitSync(const Napi::CallbackInfo& info) { return Napi::Number::New(env, error_code); } -Napi::Value KafkaConsumer::NodeCommitCb(const Napi::CallbackInfo& info) { +Napi::Value KafkaConsumer::NodeCommitCb(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); int error_code; std::optional> toppars = std::nullopt; Napi::FunctionReference *callback; - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - - if (!consumer->IsConnected()) { + if (!this->IsConnected()) { Napi::Error::New(env, "KafkaConsumer is disconnected").ThrowAsJavaScriptException(); return env.Null(); } @@ -1105,25 +1069,30 @@ Napi::Value KafkaConsumer::NodeCommitCb(const Napi::CallbackInfo& info) { if (!( (info[0].IsArray() || info[0].IsNull()) && info[1].IsFunction())) { - Napi::ThrowError( - "First argument should be an array or null and second one a callback"); - return; + Napi::Error::New(env, + "First argument should be an array or null and second one a callback").ThrowAsJavaScriptException(); + return env.Null(); } if (info[0].IsArray()) { toppars = Conversion::TopicPartition::FromV8Array(info[0].As()); } - callback = new Napi::FunctionReference(info[1].As()); - Napi::AsyncQueueWorker( - new Workers::KafkaConsumerCommitCb(callback, consumer, - toppars)); + callback = new Napi::FunctionReference(); + callback->Reset(info[1].As()); + + + Workers::KafkaConsumerCommitCb *worker = + new Workers::KafkaConsumerCommitCb(callback, this, toppars); + + worker->Queue(); return env.Null(); } -Napi::Value KafkaConsumer::NodeSubscribe(const Napi::CallbackInfo& info) { +Napi::Value KafkaConsumer::NodeSubscribe(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 1 || !info[0].IsArray()) { @@ -1132,19 +1101,18 @@ Napi::Value KafkaConsumer::NodeSubscribe(const Napi::CallbackInfo& info) { return env.Null(); } - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - Napi::Array topicsArray = info[0].As(); std::vector topics = Conversion::Util::ToStringVector(topicsArray); - Baton b = consumer->Subscribe(topics); + Baton b = this->Subscribe(topics); int error_code = static_cast(b.err()); return Napi::Number::New(env, error_code); } -Napi::Value KafkaConsumer::NodeSeek(const Napi::CallbackInfo& info) { +Napi::Value KafkaConsumer::NodeSeek(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); // If number of parameters is less than 3 (need topic partition, timeout, diff --git a/src/kafka-consumer.h b/src/kafka-consumer.h index 84a537f0..7608690c 100644 --- a/src/kafka-consumer.h +++ b/src/kafka-consumer.h @@ -40,7 +40,7 @@ class KafkaConsumer : public Connection { friend class Producer; public: static void Init(Napi::Env env, Napi::Object); - static Napi::Object NewInstance(Napi::Value); + // static Napi::Object NewInstance(Napi::Value); Baton Connect(); Baton Disconnect(); @@ -97,7 +97,8 @@ class KafkaConsumer : public Connection { static Napi::FunctionReference constructor; static void New(const Napi::CallbackInfo& info); - KafkaConsumer(Conf *, Conf *); + KafkaConsumer(const Napi::CallbackInfo& info); + // KafkaConsumer(Conf *, Conf *); ~KafkaConsumer(); private: diff --git a/src/workers.h b/src/workers.h index 3fd7674d..09699618 100644 --- a/src/workers.h +++ b/src/workers.h @@ -65,8 +65,8 @@ class ErrorAwareWorker : public Napi::AsyncWorker { return m_baton.err(); } - Napi::Object GetErrorObject() { - return m_baton.ToObject(); + Napi::Error GetErrorObject(const Napi::Env &env) { + return m_baton.ToError(env); } Baton m_baton; @@ -91,9 +91,10 @@ class MessageWorker : public ErrorAwareWorker { } void WorkMessage() { - if (!callback) { - return; - } + // TODO: is callback ever NULL? + // if (!callback) { + // return; + // } std::vector message_queue; std::vector warning_queue; @@ -157,7 +158,7 @@ class MessageWorker : public ErrorAwareWorker { uv_async_send(m_async); } - inline static NAUV_WORK_CB(m_async_message) { + inline static void m_async_message(uv_async_t *async) { MessageWorker *worker = static_cast(async->data); worker->WorkMessage(); } @@ -175,36 +176,37 @@ class MessageWorker : public ErrorAwareWorker { }; namespace Handle { -class OffsetsForTimes : public ErrorAwareWorker { - public: - OffsetsForTimes(Napi::FunctionReference*, NodeKafka::Connection*, - std::vector &, - const int &); - ~OffsetsForTimes(); - - void Execute(); - void OnOK(); - void OnError(); - - private: - NodeKafka::Connection * m_handle; - std::vector m_topic_partitions; - const int m_timeout_ms; -}; + using NodeKafka::Connection; + template class OffsetsForTimes : public ErrorAwareWorker { + public: + OffsetsForTimes(Napi::FunctionReference*, Connection*, + std::vector &, + const int &); + ~OffsetsForTimes(); + + void Execute(); + void OnOK(); + void OnError(); + + private: + Connection * m_handle; + std::vector m_topic_partitions; + const int m_timeout_ms; + }; } // namespace Handle -class ConnectionMetadata : public ErrorAwareWorker { - public: - ConnectionMetadata(Napi::FunctionReference*, NodeKafka::Connection*, - std::string, int, bool); +template class ConnectionMetadata : public ErrorAwareWorker { +public: + ConnectionMetadata(Napi::FunctionReference*, Connection*, + std::string, int, bool); ~ConnectionMetadata(); void Execute(); void OnOK(); void OnError(); - private: - NodeKafka::Connection * m_connection; +private: + Connection * m_connection; std::string m_topic; int m_timeout_ms; bool m_all_topics; @@ -212,9 +214,9 @@ class ConnectionMetadata : public ErrorAwareWorker { RdKafka::Metadata* m_metadata; }; -class ConnectionQueryWatermarkOffsets : public ErrorAwareWorker { +template class ConnectionQueryWatermarkOffsets : public ErrorAwareWorker { public: - ConnectionQueryWatermarkOffsets(Napi::FunctionReference*, NodeKafka::Connection*, + ConnectionQueryWatermarkOffsets(Napi::FunctionReference*, Connection*, std::string, int32_t, int); ~ConnectionQueryWatermarkOffsets(); @@ -223,7 +225,7 @@ class ConnectionQueryWatermarkOffsets : public ErrorAwareWorker { void OnError(); private: - NodeKafka::Connection * m_connection; + Connection * m_connection; std::string m_topic; int32_t m_partition; int m_timeout_ms; From 2dbfe798c626e3167bd82a67da6b6e5445d0fbaa Mon Sep 17 00:00:00 2001 From: Charles Lowell Date: Tue, 25 Mar 2025 23:50:36 -0500 Subject: [PATCH 08/14] Finish converting KafkaConsumer --- src/kafka-consumer.cc | 240 +++++++++++++++++++----------------------- 1 file changed, 107 insertions(+), 133 deletions(-) diff --git a/src/kafka-consumer.cc b/src/kafka-consumer.cc index 3af86803..b8645ca5 100644 --- a/src/kafka-consumer.cc +++ b/src/kafka-consumer.cc @@ -660,38 +660,33 @@ Napi::Value KafkaConsumer::NodeCommitted(const Napi::CallbackInfo &info) { Conversion::TopicPartition::FromV8Array(info[0].As()); int timeout_ms; - Napi::Maybe maybeTimeout = - info[1].As(.As().Uint32Value()); + uint32_t maybeTimeout = + info[1].As().Uint32Value(); - if (maybeTimeout.IsNothing()) { - timeout_ms = 1000; - } else { - timeout_ms = static_cast(maybeTimeout); - } + timeout_ms = static_cast(maybeTimeout); Napi::Function cb = info[2].As(); - Napi::FunctionReference *callback = new Napi::FunctionReference(cb); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); + Napi::AsyncWorker *worker = + new Workers::KafkaConsumerCommitted(callback, this, toppars, timeout_ms); - Napi::AsyncQueueWorker( - new Workers::KafkaConsumerCommitted(callback, consumer, - toppars, timeout_ms)); + worker->Queue(); return env.Null(); } -Napi::Value KafkaConsumer::NodeSubscription(const Napi::CallbackInfo& info) { +Napi::Value KafkaConsumer::NodeSubscription(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - - Baton b = consumer->Subscription(); + Baton b = this->Subscription(); if (b.err() != RdKafka::ErrorCode::ERR_NO_ERROR) { // Let the JS library throw if we need to so the error can be more rich int error_code = static_cast(b.err()); - return return Napi::Number::New(env, error_code); + return Napi::Number::New(env, error_code); } std::vector * topics = b.data*>(); @@ -1138,21 +1133,19 @@ Napi::Value KafkaConsumer::NodeSeek(const Napi::CallbackInfo &info) { } int timeout_ms; - Napi::Maybe maybeTimeout = - info[1].As(.As().Uint32Value()); + // Nan::Maybe maybeTimeout = + // Nan::To(info[1].As()); + uint32_t maybeTimeout = + info[1].As().Uint32Value(); - if (maybeTimeout.IsNothing()) { - timeout_ms = 1000; - } else { - timeout_ms = static_cast(maybeTimeout); - // Do not allow timeouts of less than 10. Providing 0 causes segfaults - // because it makes it asynchronous. - if (timeout_ms < 10) { - timeout_ms = 10; - } - } - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); + timeout_ms = static_cast(maybeTimeout); + // Do not allow timeouts of less than 10. Providing 0 causes segfaults + // because it makes it asynchronous. + if (timeout_ms < 10) { + timeout_ms = 10; + + } const RdKafka::TopicPartition * toppar = Conversion::TopicPartition::FromV8Object(info[0].As()); @@ -1162,14 +1155,20 @@ Napi::Value KafkaConsumer::NodeSeek(const Napi::CallbackInfo &info) { return env.Null(); } - Napi::FunctionReference *callback = new Napi::FunctionReference(info[2].As()); - Napi::AsyncQueueWorker( - new Workers::KafkaConsumerSeek(callback, consumer, toppar, timeout_ms)); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + + callback->Reset(info[2].As()); + + Napi::AsyncWorker *worker = + new Workers::KafkaConsumerSeek(callback, this, toppar, timeout_ms); + + worker->Queue(); return env.Null(); } -Napi::Value KafkaConsumer::NodeOffsetsStore(const Napi::CallbackInfo& info) { +Napi::Value KafkaConsumer::NodeOffsetsStore(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); // If number of parameters is less than 3 (need topic partition, timeout, @@ -1184,33 +1183,33 @@ Napi::Value KafkaConsumer::NodeOffsetsStore(const Napi::CallbackInfo& info) { return env.Null(); } - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - std::vector toppars = Conversion::TopicPartition::FromV8Array(info[0].As()); - Baton b = consumer->OffsetsStore(toppars); + Baton b = this->OffsetsStore(toppars); RdKafka::TopicPartition::destroy(toppars); int error_code = static_cast(b.err()); return Napi::Number::New(env, error_code); } -Napi::Value KafkaConsumer::NodeOffsetsStoreSingle(const Napi::CallbackInfo& info) { +Napi::Value +KafkaConsumer::NodeOffsetsStoreSingle(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); Napi::HandleScope scope(env); // If number of parameters is less than 3 (need topic partition, partition, // offset, and leader epoch), we can't call this. if (info.Length() < 4) { - return Napi::ThrowError( - "Must provide topic, partition, offset and leaderEpoch"); + Napi::Error::New(env, + "Must provide topic, partition, offset and leaderEpoch") + .ThrowAsJavaScriptException(); + return env.Null(); } - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - // Get string pointer for the topic name - std::string topicUTF8 = info[0].As(.To()); - const std::string& topic_name(*topicUTF8); + std::string topicUTF8 = info[0].As().Utf8Value(); + const std::string& topic_name(topicUTF8); int64_t partition = info[1].As().Int64Value(); int64_t offset = info[2].As().Int64Value(); @@ -1221,7 +1220,7 @@ Napi::Value KafkaConsumer::NodeOffsetsStoreSingle(const Napi::CallbackInfo& info toppar->set_leader_epoch(leader_epoch); std::vector toppars = {toppar}; - Baton b = consumer->OffsetsStore(toppars); + Baton b = this->OffsetsStore(toppars); delete toppar; @@ -1229,7 +1228,8 @@ Napi::Value KafkaConsumer::NodeOffsetsStoreSingle(const Napi::CallbackInfo& info return Napi::Number::New(env, error_code); } -Napi::Value KafkaConsumer::NodePause(const Napi::CallbackInfo& info) { +Napi::Value KafkaConsumer::NodePause(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); // If number of parameters is less than 3 (need topic partition, timeout, @@ -1244,12 +1244,10 @@ Napi::Value KafkaConsumer::NodePause(const Napi::CallbackInfo& info) { return env.Null(); } - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - std::vector toppars = Conversion::TopicPartition::FromV8Array(info[0].As()); - Baton b = consumer->Pause(toppars); + Baton b = this->Pause(toppars); RdKafka::TopicPartition::destroy(toppars); #if 0 @@ -1269,7 +1267,8 @@ Napi::Value KafkaConsumer::NodePause(const Napi::CallbackInfo& info) { return Napi::Number::New(env, error_code); } -Napi::Value KafkaConsumer::NodeResume(const Napi::CallbackInfo& info) { +Napi::Value KafkaConsumer::NodeResume(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); // If number of parameters is less than 3 (need topic partition, timeout, @@ -1284,12 +1283,10 @@ Napi::Value KafkaConsumer::NodeResume(const Napi::CallbackInfo& info) { return env.Null(); } - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - std::vector toppars = Conversion::TopicPartition::FromV8Array(info[0].As()); - Baton b = consumer->Resume(toppars); + Baton b = this->Resume(toppars); // Now iterate through and delete these toppars for (std::vector::const_iterator it = toppars.begin(); // NOLINT @@ -1306,7 +1303,8 @@ Napi::Value KafkaConsumer::NodeResume(const Napi::CallbackInfo& info) { return Napi::Number::New(env, error_code); } -Napi::Value KafkaConsumer::NodeConsumeLoop(const Napi::CallbackInfo& info) { +Napi::Value KafkaConsumer::NodeConsumeLoop(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 3) { @@ -1331,48 +1329,39 @@ Napi::Value KafkaConsumer::NodeConsumeLoop(const Napi::CallbackInfo& info) { } int timeout_ms; - Napi::Maybe maybeTimeout = - info[0].As(.As().Uint32Value()); - - if (maybeTimeout.IsNothing()) { - timeout_ms = 1000; - } else { + uint32_t maybeTimeout = + info[0].As().Uint32Value(); timeout_ms = static_cast(maybeTimeout); - } int timeout_sleep_delay_ms; - Napi::Maybe maybeSleep = - info[1].As(.As().Uint32Value()); + uint32_t maybeSleep = + info[1].As().Uint32Value(); - if (maybeSleep.IsNothing()) { - timeout_sleep_delay_ms = 500; - } else { - timeout_sleep_delay_ms = static_cast(maybeSleep); - } + timeout_sleep_delay_ms = static_cast(maybeSleep); - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - - if (consumer->m_consume_loop != nullptr) { + if (this->m_consume_loop != nullptr) { Napi::Error::New(env, "Consume was already called").ThrowAsJavaScriptException(); return env.Null(); } - if (!consumer->IsConnected()) { + if (!this->IsConnected()) { Napi::Error::New(env, "Connect must be called before consume").ThrowAsJavaScriptException(); return env.Null(); } Napi::Function cb = info[2].As(); - Napi::FunctionReference *callback = new Napi::FunctionReference(cb); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); - consumer->m_consume_loop = - new Workers::KafkaConsumerConsumeLoop(callback, consumer, timeout_ms, timeout_sleep_delay_ms); // NOLINT + this->m_consume_loop = + new Workers::KafkaConsumerConsumeLoop(callback, this, timeout_ms, timeout_sleep_delay_ms); // NOLINT return env.Null(); } -Napi::Value KafkaConsumer::NodeConsume(const Napi::CallbackInfo& info) { +Napi::Value KafkaConsumer::NodeConsume(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 2) { @@ -1382,14 +1371,10 @@ Napi::Value KafkaConsumer::NodeConsume(const Napi::CallbackInfo& info) { } int timeout_ms; - Napi::Maybe maybeTimeout = - info[0].As(.As().Uint32Value()); + uint32_t maybeTimeout = + info[0].As().Uint32Value(); - if (maybeTimeout.IsNothing()) { - timeout_ms = 1000; - } else { - timeout_ms = static_cast(maybeTimeout); - } + timeout_ms = static_cast(maybeTimeout); if (info[1].IsNumber()) { if (!info[2].IsBoolean()) { @@ -1403,53 +1388,43 @@ Napi::Value KafkaConsumer::NodeConsume(const Napi::CallbackInfo& info) { } Napi::Number numMessagesNumber = info[1].As(); - Napi::Maybe numMessagesMaybe = numMessagesNumber.As().Uint32Value(); // NOLINT + uint32_t numMessages = numMessagesNumber.As().Uint32Value(); // NOLINT - uint32_t numMessages; - if (numMessagesMaybe.IsNothing()) { + if (numMessages == 0) { Napi::Error::New(env, "Parameter must be a number over 0").ThrowAsJavaScriptException(); return env.Null(); - } else { - numMessages = numMessagesMaybe; } Napi::Boolean isTimeoutOnlyForFirstMessageBoolean = info[2].As(); // NOLINT - Napi::Maybe isTimeoutOnlyForFirstMessageMaybe = + bool isTimeoutOnlyForFirstMessage = isTimeoutOnlyForFirstMessageBoolean.As().Value(); - bool isTimeoutOnlyForFirstMessage; - if (isTimeoutOnlyForFirstMessageMaybe.IsNothing()) { - Napi::Error::New(env, "Parameter must be a boolean").ThrowAsJavaScriptException(); - return env.Null(); - } else { - isTimeoutOnlyForFirstMessage = isTimeoutOnlyForFirstMessageMaybe; // NOLINT - } - - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - Napi::Function cb = info[3].As(); - Napi::FunctionReference *callback = new Napi::FunctionReference(cb); - Napi::AsyncQueueWorker( - new Workers::KafkaConsumerConsumeNum(callback, consumer, numMessages, timeout_ms, isTimeoutOnlyForFirstMessage)); // NOLINT + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); + Napi::AsyncWorker *worker = new Workers::KafkaConsumerConsumeNum( + callback, this, numMessages, timeout_ms, isTimeoutOnlyForFirstMessage); + worker->Queue(); } else { if (!info[1].IsFunction()) { Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); return env.Null(); } - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - Napi::Function cb = info[1].As(); - Napi::FunctionReference *callback = new Napi::FunctionReference(cb); - Napi::AsyncQueueWorker( - new Workers::KafkaConsumerConsume(callback, consumer, timeout_ms)); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); + + Napi::AsyncWorker* worker = new Workers::KafkaConsumerConsume(callback, this, timeout_ms); + worker->Queue(); } return env.Null(); } -Napi::Value KafkaConsumer::NodeConnect(const Napi::CallbackInfo& info) { +Napi::Value KafkaConsumer::NodeConnect(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 1 || !info[0].IsFunction()) { @@ -1458,20 +1433,20 @@ Napi::Value KafkaConsumer::NodeConnect(const Napi::CallbackInfo& info) { return env.Null(); } - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - // Activate the dispatchers before the connection, as some callbacks may run // on the background thread. // We will deactivate them if the connection fails. - consumer->ActivateDispatchers(); - - Napi::FunctionReference *callback = new Napi::FunctionReference(info[0].As()); - new Workers::KafkaConsumerConnect(callback, consumer).Queue(); + this->ActivateDispatchers(); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(info[0].As()); + Napi::AsyncWorker* worker = new Workers::KafkaConsumerConnect(callback, this); + worker->Queue(); return env.Null(); } -Napi::Value KafkaConsumer::NodeDisconnect(const Napi::CallbackInfo& info) { +Napi::Value KafkaConsumer::NodeDisconnect(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 1 || !info[0].IsFunction()) { @@ -1481,36 +1456,35 @@ Napi::Value KafkaConsumer::NodeDisconnect(const Napi::CallbackInfo& info) { } Napi::Function cb = info[0].As(); - Napi::FunctionReference *callback = new Napi::FunctionReference(cb); - KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); Workers::KafkaConsumerConsumeLoop* consumeLoop = - (Workers::KafkaConsumerConsumeLoop*)consumer->m_consume_loop; + (Workers::KafkaConsumerConsumeLoop*)this->m_consume_loop; if (consumeLoop != nullptr) { // stop the consume loop consumeLoop->Close(); // cleanup the async worker - consumeLoop->WorkComplete(); + // consumeLoop->WorkComplete(); consumeLoop->Destroy(); - consumer->m_consume_loop = nullptr; + this->m_consume_loop = nullptr; } - Napi::AsyncQueueWorker( - new Workers::KafkaConsumerDisconnect(callback, consumer)); + Napi::AsyncWorker* worker = new Workers::KafkaConsumerDisconnect(callback, this); + + worker->Queue(); return env.Null(); } -Napi::Value KafkaConsumer::NodeGetWatermarkOffsets(const Napi::CallbackInfo& info) { +Napi::Value KafkaConsumer::NodeGetWatermarkOffsets(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); - KafkaConsumer* obj = ObjectWrap::Unwrap(info.This()); - if (!info[0].IsString()) { - Napi::Error::New(env, "1st parameter must be a topic string").ThrowAsJavaScriptException(); -; - return; + Napi::Error::New(env, "1st parameter must be a topic string").ThrowAsJavaScriptException(); + return env.Null(); } if (!info[1].IsNumber()) { @@ -1519,9 +1493,9 @@ Napi::Value KafkaConsumer::NodeGetWatermarkOffsets(const Napi::CallbackInfo& inf } // Get string pointer for the topic name - std::string topicUTF8 = info[0].As(.To()); + std::string topicUTF8 = info[0].As().Utf8Value(); // The first parameter is the topic - std::string topic_name(*topicUTF8); + std::string topic_name(topicUTF8); // Second parameter is the partition int32_t partition = info[1].As().Int32Value(); @@ -1530,13 +1504,13 @@ Napi::Value KafkaConsumer::NodeGetWatermarkOffsets(const Napi::CallbackInfo& inf int64_t low_offset; int64_t high_offset; - Baton b = obj->GetWatermarkOffsets( + Baton b = this->GetWatermarkOffsets( topic_name, partition, &low_offset, &high_offset); if (b.err() != RdKafka::ERR_NO_ERROR) { // Let the JS library throw if we need to so the error can be more rich int error_code = static_cast(b.err()); - return return Napi::Number::New(env, error_code); + return Napi::Number::New(env, error_code); } else { Napi::Object offsetsObj = Napi::Object::New(env); (offsetsObj).Set(Napi::String::New(env, "lowOffset"), @@ -1544,7 +1518,7 @@ Napi::Value KafkaConsumer::NodeGetWatermarkOffsets(const Napi::CallbackInfo& inf (offsetsObj).Set(Napi::String::New(env, "highOffset"), Napi::Number::New(env, high_offset)); - return return offsetsObj; + return offsetsObj; } } From bba76c9c2173c60597dd591e9d20c13df7a9dbaa Mon Sep 17 00:00:00 2001 From: Charles Lowell Date: Wed, 26 Mar 2025 09:42:26 -0500 Subject: [PATCH 09/14] producer: NAN -> N-API --- src/errors.cc | 6 + src/errors.h | 7 +- src/producer.cc | 429 +++++++++++++++++++++++------------------------- src/producer.h | 33 ++-- 4 files changed, 229 insertions(+), 246 deletions(-) diff --git a/src/errors.cc b/src/errors.cc index 91fb11fd..5799f85d 100644 --- a/src/errors.cc +++ b/src/errors.cc @@ -49,6 +49,12 @@ Napi::Error RdKafkaError( return ret; } +Napi::Value ThrowError(const Napi::Env& env, const std::string &message) { + Napi::Error error = Napi::Error::New(env, message); + error.ThrowAsJavaScriptException(); + return error.Value(); +} + Baton::Baton(const RdKafka::ErrorCode &code) { m_err = code; } diff --git a/src/errors.h b/src/errors.h index d173212b..a3f21ab9 100644 --- a/src/errors.h +++ b/src/errors.h @@ -54,9 +54,10 @@ class Baton { Napi::Error RdKafkaError(const Napi::Env &env, const RdKafka::ErrorCode &); Napi::Error RdKafkaError(const Napi::Env &env, const RdKafka::ErrorCode &, const std::string &); -Napi::Error RdKafkaError(const Napi::Env &env, const RdKafka::ErrorCode &err, std::string errstr, - bool isFatal, bool isRetriable, - bool isTxnRequiresAbort); +Napi::Error RdKafkaError(const Napi::Env &env, const RdKafka::ErrorCode &err, + std::string errstr, bool isFatal, bool isRetriable, + bool isTxnRequiresAbort); +Napi::Value ThrowError(const Napi::Env &env, const std::string &); } // namespace NodeKafka diff --git a/src/producer.cc b/src/producer.cc index 0e204628..4afd5e02 100644 --- a/src/producer.cc +++ b/src/producer.cc @@ -30,151 +30,122 @@ namespace NodeKafka { * @sa NodeKafka::Connection */ -Producer::Producer(Conf* gconfig, Conf* tconfig): - Connection(gconfig, tconfig), - m_dr_cb(), - m_partitioner_cb(), - m_is_background_polling(false) { - std::string errstr; +Producer::Producer(const Napi::CallbackInfo &info) + : Connection(info), m_dr_cb(), m_partitioner_cb(), + m_is_background_polling(false) { - if (m_tconfig) - m_gconfig->set("default_topic_conf", m_tconfig, errstr); - - m_gconfig->set("dr_cb", &m_dr_cb, errstr); - } - -Producer::~Producer() { - Disconnect(); -} - -Napi::FunctionReference Producer::constructor; - -void Producer::Init(Napi::Object exports) { - Napi::HandleScope scope(env); - - Napi::FunctionReference tpl = Napi::Function::New(env, New); - tpl->SetClassName(Napi::String::New(env, "Producer")); - - - /* - * Lifecycle events inherited from NodeKafka::Connection - * - * @sa NodeKafka::Connection - */ - - InstanceMethod("configureCallbacks", &NodeConfigureCallbacks), - - /* - * @brief Methods to do with establishing state - */ - - InstanceMethod("connect", &NodeConnect), - InstanceMethod("disconnect", &NodeDisconnect), - InstanceMethod("getMetadata", &NodeGetMetadata), - InstanceMethod("queryWatermarkOffsets", &NodeQueryWatermarkOffsets), // NOLINT - InstanceMethod("poll", &NodePoll), - InstanceMethod("setPollInBackground", &NodeSetPollInBackground), - InstanceMethod("setSaslCredentials", &NodeSetSaslCredentials), - InstanceMethod("setOAuthBearerToken", &NodeSetOAuthBearerToken), - Napi::SetPrototypeMethod(tpl, "setOAuthBearerTokenFailure", - NodeSetOAuthBearerTokenFailure); - - /* - * @brief Methods exposed to do with message production - */ - - InstanceMethod("setPartitioner", &NodeSetPartitioner), - InstanceMethod("produce", &NodeProduce), - - InstanceMethod("flush", &NodeFlush), - - /* - * @brief Methods exposed to do with transactions - */ - - InstanceMethod("initTransactions", &NodeInitTransactions), - InstanceMethod("beginTransaction", &NodeBeginTransaction), - InstanceMethod("commitTransaction", &NodeCommitTransaction), - InstanceMethod("abortTransaction", &NodeAbortTransaction), - InstanceMethod("sendOffsetsToTransaction", &NodeSendOffsetsToTransaction), // NOLINT - - // connect. disconnect. resume. pause. get meta data - constructor.Reset((tpl->GetFunction(Napi::GetCurrentContext())) - ); - - (exports).Set(Napi::String::New(env, "Producer"), - tpl->GetFunction(Napi::GetCurrentContext())); -} - -void Producer::New(const Napi::CallbackInfo& info) { Napi::Env env = info.Env(); if (!info.IsConstructCall()) { Napi::Error::New(env, "non-constructor invocation not supported").ThrowAsJavaScriptException(); - return env.Null(); + return; } if (info.Length() < 2) { Napi::Error::New(env, "You must supply global and topic configuration").ThrowAsJavaScriptException(); - return env.Null(); + return; } if (!info[0].IsObject()) { Napi::Error::New(env, "Global configuration data must be specified").ThrowAsJavaScriptException(); - return env.Null(); + return; } std::string errstr; Conf* gconfig = Conf::create(RdKafka::Conf::CONF_GLOBAL, - (info[0].ToObject(Napi::GetCurrentContext())), errstr); + (info[0].ToObject()), errstr); if (!gconfig) { Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); - return env.Null(); + return; } // If tconfig isn't set, then just let us pick properties from gconf. Conf* tconfig = nullptr; if (info[1].IsObject()) { tconfig = Conf::create( - RdKafka::Conf::CONF_TOPIC, - (info[1].ToObject(Napi::GetCurrentContext())), errstr); + RdKafka::Conf::CONF_TOPIC, + (info[1].ToObject()), errstr); if (!tconfig) { // No longer need this since we aren't instantiating anything delete gconfig; Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); - return env.Null(); + return; } } - Producer* producer = new Producer(gconfig, tconfig); + this->Config(gconfig, tconfig); - // Wrap it - producer->Wrap(info.This()); + if (m_tconfig) + m_gconfig->set("default_topic_conf", m_tconfig, errstr); - // Then there is some weird initialization that happens - // basically it sets the configuration data - // we don't need to do that because we lazy load it + m_gconfig->set("dr_cb", &m_dr_cb, errstr); +} - return info.This(); +Producer::~Producer() { + Disconnect(); } -Napi::Object Producer::NewInstance(Napi::Value arg) { - Napi::Env env = arg.Env(); - Napi::EscapableHandleScope scope(env); +Napi::FunctionReference Producer::constructor; + +void Producer::Init(const Napi::Env& env, Napi::Object exports) { + Napi::HandleScope scope(env); + + Napi::Function Producer = DefineClass(env, "Producer", { + /* + * Lifecycle events inherited from NodeKafka::Connection + * + * @sa NodeKafka::Connection + */ + + InstanceMethod("configureCallbacks", &Producer::NodeConfigureCallbacks), + + /* + * @brief Methods to do with establishing state + */ + + InstanceMethod("connect", &Producer::NodeConnect), + InstanceMethod("disconnect", &Producer::NodeDisconnect), + InstanceMethod("getMetadata", &Producer::NodeGetMetadata), + InstanceMethod("queryWatermarkOffsets", &Producer::NodeQueryWatermarkOffsets), // NOLINT + InstanceMethod("poll", &Producer::NodePoll), + InstanceMethod("setPollInBackground", &Producer::NodeSetPollInBackground), + InstanceMethod("setSaslCredentials", &Producer::NodeSetSaslCredentials), + InstanceMethod("setOAuthBearerToken", &Producer::NodeSetOAuthBearerToken), + StaticMethod("setOAuthBearerTokenFailure",&Producer::NodeSetOAuthBearerTokenFailure), + + /* + * @brief Methods exposed to do with message production + */ - const unsigned argc = 1; + InstanceMethod("setPartitioner", &Producer::NodeSetPartitioner), + InstanceMethod("produce", &Producer::NodeProduce), - Napi::Value argv[argc] = { arg }; - Napi::Function cons = Napi::Function::New(env, constructor); - Napi::Object instance = - Napi::NewInstance(cons, argc, argv); + InstanceMethod("flush", &Producer::NodeFlush), - return scope.Escape(instance); + /* + * @brief Methods exposed to do with transactions + */ + + InstanceMethod("initTransactions", &Producer::NodeInitTransactions), + InstanceMethod("beginTransaction", &Producer::NodeBeginTransaction), + InstanceMethod("commitTransaction", &Producer::NodeCommitTransaction), + InstanceMethod("abortTransaction", &Producer::NodeAbortTransaction), + InstanceMethod("sendOffsetsToTransaction", &Producer::NodeSendOffsetsToTransaction), // NOLINT + }); + + + + + // connect. disconnect. resume. pause. get meta data + constructor.Reset(Producer); + + exports.Set(Napi::String::New(env, "Producer"), Producer); } + Baton Producer::Connect() { if (IsConnected()) { return Baton(RdKafka::ERR_NO_ERROR); @@ -376,29 +347,29 @@ Baton Producer::SetPollInBackground(bool set) { return Baton(RdKafka::ERR_NO_ERROR); } -void Producer::ConfigureCallback(const std::string& string_key, - const Napi::Function& cb, bool add) { - if (string_key.compare("delivery_cb") == 0) { - if (add) { - bool dr_msg_cb = false; - Napi::String dr_msg_cb_key = Napi::String::New(env, "dr_msg_cb"); // NOLINT - if ((cb).Has(dr_msg_cb_key).FromMaybe(false)) { - Napi::Value v = (cb).Get(dr_msg_cb_key); - if (v->IsBoolean()) { - dr_msg_cb = v.As().Value().ToChecked(); - } - } - if (dr_msg_cb) { - this->m_dr_cb.SendMessageBuffer(true); - } - this->m_dr_cb.dispatcher.AddCallback(cb); - } else { - this->m_dr_cb.dispatcher.RemoveCallback(cb); - } - } else { - Connection::ConfigureCallback(string_key, cb, add); - } -} +// void Producer::ConfigureCallback(const std::string& string_key, +// const Napi::Function& cb, bool add) { +// if (string_key.compare("delivery_cb") == 0) { +// if (add) { +// bool dr_msg_cb = false; +// Napi::String dr_msg_cb_key = Napi::String::New(env, "dr_msg_cb"); // NOLINT +// if ((cb).Has(dr_msg_cb_key).FromMaybe(false)) { +// Napi::Value v = (cb).Get(dr_msg_cb_key); +// if (v->IsBoolean()) { +// dr_msg_cb = v.As().Value().ToChecked(); +// } +// } +// if (dr_msg_cb) { +// this->m_dr_cb.SendMessageBuffer(true); +// } +// this->m_dr_cb.dispatcher.AddCallback(cb); +// } else { +// this->m_dr_cb.dispatcher.RemoveCallback(cb); +// } +// } else { +// Connection::ConfigureCallback(string_key, cb, add); +// } +// } Baton Producer::InitTransactions(int32_t timeout_ms) { if (!IsConnected()) { @@ -482,7 +453,8 @@ Baton Producer::SendOffsetsToTransaction( * * @sa RdKafka::Producer::produce */ -Napi::Value Producer::NodeProduce(const Napi::CallbackInfo& info) { +Napi::Value Producer::NodeProduce(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); // Need to extract the message data here. @@ -516,8 +488,7 @@ Napi::Value Producer::NodeProduce(const Napi::CallbackInfo& info) { Napi::Error::New(env, "Message must be a buffer or null").ThrowAsJavaScriptException(); return env.Null(); } else { - Napi::Object message_buffer_object = - (info[2].ToObject(Napi::GetCurrentContext())); + Napi::Object message_buffer_object = info[2].ToObject(); // v8 handles the garbage collection here so we need to make a copy of // the buffer or assign the buffer to a persistent handle. @@ -548,8 +519,7 @@ Napi::Value Producer::NodeProduce(const Napi::CallbackInfo& info) { key_buffer_length = 0; key_buffer_data = NULL; } else if (info[3].IsBuffer()) { - Napi::Object key_buffer_object = - (info[3].ToObject(Napi::GetCurrentContext())); + Napi::Object key_buffer_object = info[3].ToObject(); // v8 handles the garbage collection here so we need to make a copy of // the buffer or assign the buffer to a persistent handle. @@ -570,10 +540,10 @@ Napi::Value Producer::NodeProduce(const Napi::CallbackInfo& info) { } } else { // If it was a string just use the utf8 value. - Napi::String val = info[3].To(); + Napi::String val = info[3].ToString(); // Get string pointer for this thing - std::string keyUTF8 = val.As(); - key = new std::string(*keyUTF8); + std::string keyUTF8 = val.Utf8Value(); + key = new std::string(keyUTF8); key_buffer_data = key->data(); key_buffer_length = key->length(); @@ -596,7 +566,7 @@ Napi::Value Producer::NodeProduce(const Napi::CallbackInfo& info) { // Opaque handling if (info.Length() > 5 && !info[5].IsUndefined()) { // We need to create a persistent handle - opaque = new Napi::Persistent(info[5]); + opaque = Napi::Persistent(info[5]); // To get the local from this later, // Napi::Object object = Napi::New(env, persistent); } @@ -605,43 +575,41 @@ Napi::Value Producer::NodeProduce(const Napi::CallbackInfo& info) { if (info.Length() > 6 && !info[6].IsUndefined()) { Napi::Array v8Headers = info[6].As(); - if (v8Headers->Length() >= 1) { - for (unsigned int i = 0; i < v8Headers->Length(); i++) { + if (v8Headers.Length() >= 1) { + for (unsigned int i = 0; i < v8Headers.Length(); i++) { Napi::Object header = (v8Headers).Get(i) - ->ToObject(Napi::GetCurrentContext()); + .ToObject(); if (header.IsEmpty()) { continue; } - Napi::Array props = header->GetOwnPropertyNames( - Napi::GetCurrentContext()); + Napi::Array props = header.GetPropertyNames(); // TODO: Other properties in the list of properties should not be // ignored, but they are. This is a bug, need to handle it either in JS // or here. - Napi::MaybeLocal v8Key = - (props).Get(0.To()); + Napi::MaybeOrValue jsKey = props.Get(Napi::Value::From(env, 0)); // The key must be a string. - if (v8Key.IsEmpty()) { + if (jsKey.IsEmpty()) { Napi::Error::New(env, "Header key must be a string").ThrowAsJavaScriptException(); } - std::string uKey = v8Key.ToLocalChecked(.As()); - std::string key(*uKey); + std::string uKey = jsKey.ToString().Utf8Value(); + std::string key(uKey); // Valid types for the header are string or buffer. // Other types will throw an error. Napi::Value v8Value = - (header).Get(v8Key); + (header).Get(jsKey); if (v8Value.IsBuffer()) { const char* value = v8Value.As>().Data(); const size_t value_len = v8Value.As>().Length(); headers.push_back(RdKafka::Headers::Header(key, value, value_len)); } else if (v8Value.IsString()) { - std::string uValue = v8Value.As(); - std::string value(*uValue); + std::string uValue = v8Value.As().Utf8Value(); + std::string value(uValue); headers.push_back( RdKafka::Headers::Header(key, value.c_str(), value.size())); } else { @@ -652,18 +620,17 @@ Napi::Value Producer::NodeProduce(const Napi::CallbackInfo& info) { } } - Producer* producer = ObjectWrap::Unwrap(info.This()); // Let the JS library throw if we need to so the error can be more rich int error_code; if (info[0].IsString()) { // Get string pointer for this thing - std::string topicUTF8 = info[0].As(.To()); - std::string topic_name(*topicUTF8); + std::string topicUTF8 = info[0].ToString().Utf8Value(); + std::string topic_name(topicUTF8); RdKafka::Headers *rd_headers = RdKafka::Headers::create(headers); - Baton b = producer->Produce(message_buffer_data, message_buffer_length, + Baton b = this->Produce(message_buffer_data, message_buffer_length, topic_name, partition, key_buffer_data, key_buffer_length, timestamp, opaque, rd_headers); @@ -673,21 +640,21 @@ Napi::Value Producer::NodeProduce(const Napi::CallbackInfo& info) { } } else { // First parameter is a topic OBJECT - Topic* topic = ObjectWrap::Unwrap(info[0].As()); + Topic* topic = ObjectWrap::Unwrap(info[0].As()); // Unwrap it and turn it into an RdKafka::Topic* - Baton topic_baton = topic->toRDKafkaTopic(producer); + Baton topic_baton = topic->toRDKafkaTopic(this); if (topic_baton.err() != RdKafka::ERR_NO_ERROR) { // Let the JS library throw if we need to so the error can be more rich error_code = static_cast(topic_baton.err()); - return return Napi::Number::New(env, error_code); + return Napi::Number::New(env, error_code); } RdKafka::Topic* rd_topic = topic_baton.data(); - Baton b = producer->Produce(message_buffer_data, message_buffer_length, + Baton b = this->Produce(message_buffer_data, message_buffer_length, rd_topic, partition, key_buffer_data, key_buffer_length, opaque); // Delete the topic when we are done. @@ -701,8 +668,8 @@ Napi::Value Producer::NodeProduce(const Napi::CallbackInfo& info) { // be a delivery report for it, so we have to clean up the opaque // data now, if there was any. - Napi::Persistent *persistent = - static_cast *>(opaque); + Napi::Reference *persistent = + static_cast *>(opaque); persistent->Reset(); delete persistent; } @@ -714,7 +681,8 @@ Napi::Value Producer::NodeProduce(const Napi::CallbackInfo& info) { return Napi::Number::New(env, error_code); } -Napi::Value Producer::NodeSetPartitioner(const Napi::CallbackInfo& info) { +Napi::Value Producer::NodeSetPartitioner(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 1 || !info[0].IsFunction()) { @@ -723,13 +691,13 @@ Napi::Value Producer::NodeSetPartitioner(const Napi::CallbackInfo& info) { return env.Null(); } - Producer* producer = ObjectWrap::Unwrap(info.This()); Napi::Function cb = info[0].As(); - producer->m_partitioner_cb.SetCallback(cb); - return env.True(); + this->m_partitioner_cb.SetCallback(cb); + return Napi::Value::From(env, true); } -Napi::Value Producer::NodeConnect(const Napi::CallbackInfo& info) { +Napi::Value Producer::NodeConnect(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 1 || !info[0].IsFunction()) { @@ -740,50 +708,48 @@ Napi::Value Producer::NodeConnect(const Napi::CallbackInfo& info) { // This needs to be offloaded to libuv Napi::Function cb = info[0].As(); - Napi::FunctionReference *callback = new Napi::FunctionReference(cb); - - Producer* producer = ObjectWrap::Unwrap(info.This()); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); // Activate the dispatchers before the connection, as some callbacks may run // on the background thread. // We will deactivate them if the connection fails. - producer->ActivateDispatchers(); + this->ActivateDispatchers(); - new Workers::ProducerConnect(callback, producer).Queue(); + (new Workers::ProducerConnect(callback, this))->Queue(); return env.Null(); } -Napi::Value Producer::NodePoll(const Napi::CallbackInfo& info) { +Napi::Value Producer::NodePoll(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); - Producer* producer = ObjectWrap::Unwrap(info.This()); - - if (!producer->IsConnected()) { + if (!this->IsConnected()) { Napi::Error::New(env, "Producer is disconnected").ThrowAsJavaScriptException(); - + return env.Null(); } else { - producer->Poll(); - return env.True(); + this->Poll(); + return Napi::Boolean::From(env, true); } } -Napi::Value Producer::NodeSetPollInBackground(const Napi::CallbackInfo& info) { +Napi::Value Producer::NodeSetPollInBackground(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 1 || !info[0].IsBoolean()) { // Just throw an exception - return Napi::ThrowError( - "Need to specify a boolean for setting or unsetting"); + Napi::Error::New(env, "Need to specify a boolean for setting or unsetting") + .ThrowAsJavaScriptException(); } bool set = info[0].As().Value(); - Producer* producer = ObjectWrap::Unwrap(info.This()); - Baton b = producer->SetPollInBackground(set); + Baton b = this->SetPollInBackground(set); if (b.err() != RdKafka::ERR_NO_ERROR) { Napi::Error::New(env, b.errstr().c_str()).ThrowAsJavaScriptException(); return env.Null(); } - return b.ToObject(); + return b.ToError(env).Value(); } Baton Producer::Flush(int timeout_ms) { @@ -803,7 +769,8 @@ Baton Producer::Flush(int timeout_ms) { return Baton(response_code); } -Napi::Value Producer::NodeFlush(const Napi::CallbackInfo& info) { +Napi::Value Producer::NodeFlush(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 2 || !info[1].IsFunction() || !info[0].IsNumber()) { @@ -815,17 +782,17 @@ Napi::Value Producer::NodeFlush(const Napi::CallbackInfo& info) { int timeout_ms = info[0].As().Int32Value(); Napi::Function cb = info[1].As(); - Napi::FunctionReference *callback = new Napi::FunctionReference(cb); - - Producer* producer = ObjectWrap::Unwrap(info.This()); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); - Napi::AsyncQueueWorker( - new Workers::ProducerFlush(callback, producer, timeout_ms)); + Napi::AsyncWorker* worker = new Workers::ProducerFlush(callback, this, timeout_ms); + worker->Queue(); return env.Null(); } -Napi::Value Producer::NodeDisconnect(const Napi::CallbackInfo& info) { +Napi::Value Producer::NodeDisconnect(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 1 || !info[0].IsFunction()) { @@ -836,15 +803,18 @@ Napi::Value Producer::NodeDisconnect(const Napi::CallbackInfo& info) { Napi::Function cb = info[0].As(); - Napi::FunctionReference *callback = new Napi::FunctionReference(cb); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); - Producer* producer = ObjectWrap::Unwrap(info.This()); - new Workers::ProducerDisconnect(callback, producer).Queue(); + + Napi::AsyncWorker* worker = new Workers::ProducerDisconnect(callback, this); + worker->Queue(); return env.Null(); } -Napi::Value Producer::NodeInitTransactions(const Napi::CallbackInfo& info) { +Napi::Value Producer::NodeInitTransactions(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 2 || !info[1].IsFunction() || !info[0].IsNumber()) { @@ -855,16 +825,17 @@ Napi::Value Producer::NodeInitTransactions(const Napi::CallbackInfo& info) { int timeout_ms = info[0].As().Int32Value(); Napi::Function cb = info[1].As(); - Napi::FunctionReference *callback = new Napi::FunctionReference(cb); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); - Producer* producer = ObjectWrap::Unwrap(info.This()); - Napi::AsyncQueueWorker( - new Workers::ProducerInitTransactions(callback, producer, timeout_ms)); + Napi::AsyncWorker* worker = new Workers::ProducerInitTransactions(callback, this, timeout_ms); + worker->Queue(); return env.Null(); } -Napi::Value Producer::NodeBeginTransaction(const Napi::CallbackInfo& info) { +Napi::Value Producer::NodeBeginTransaction(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 1 || !info[0].IsFunction()) { @@ -873,15 +844,17 @@ Napi::Value Producer::NodeBeginTransaction(const Napi::CallbackInfo& info) { } Napi::Function cb = info[0].As(); - Napi::FunctionReference *callback = new Napi::FunctionReference(cb); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); - Producer* producer = ObjectWrap::Unwrap(info.This()); - new Workers::ProducerBeginTransaction(callback, producer).Queue(); // NOLINT + Napi::AsyncWorker* worker = new Workers::ProducerBeginTransaction(callback, this); + worker->Queue(); return env.Null(); } -Napi::Value Producer::NodeCommitTransaction(const Napi::CallbackInfo& info) { +Napi::Value Producer::NodeCommitTransaction(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 2 || !info[1].IsFunction() || !info[0].IsNumber()) { @@ -892,16 +865,17 @@ Napi::Value Producer::NodeCommitTransaction(const Napi::CallbackInfo& info) { int timeout_ms = info[0].As().Int32Value(); Napi::Function cb = info[1].As(); - Napi::FunctionReference *callback = new Napi::FunctionReference(cb); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); - Producer* producer = ObjectWrap::Unwrap(info.This()); - Napi::AsyncQueueWorker( - new Workers::ProducerCommitTransaction(callback, producer, timeout_ms)); + Napi::AsyncWorker* worker = new Workers::ProducerCommitTransaction(callback, this, timeout_ms); + worker->Queue(); return env.Null(); } -Napi::Value Producer::NodeAbortTransaction(const Napi::CallbackInfo& info) { +Napi::Value Producer::NodeAbortTransaction(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 2 || !info[1].IsFunction() || !info[0].IsNumber()) { @@ -912,54 +886,57 @@ Napi::Value Producer::NodeAbortTransaction(const Napi::CallbackInfo& info) { int timeout_ms = info[0].As().Int32Value(); Napi::Function cb = info[1].As(); - Napi::FunctionReference *callback = new Napi::FunctionReference(cb); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); - Producer* producer = ObjectWrap::Unwrap(info.This()); - Napi::AsyncQueueWorker( - new Workers::ProducerAbortTransaction(callback, producer, timeout_ms)); + Napi::AsyncWorker *worker = + new Workers::ProducerAbortTransaction(callback, this, timeout_ms); + worker->Queue(); return env.Null(); } -Napi::Value Producer::NodeSendOffsetsToTransaction(const Napi::CallbackInfo& info) { +Napi::Value +Producer::NodeSendOffsetsToTransaction(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 4) { - return Napi::ThrowError( + return ThrowError(env, "Need to specify offsets, consumer, timeout for 'send offsets to transaction', and callback"); // NOLINT } if (!info[0].IsArray()) { - return Napi::ThrowError( + return ThrowError(env, "First argument to 'send offsets to transaction' has to be a consumer object"); // NOLINT } if (!info[1].IsObject()) { - Napi::Error::New(env, "Kafka consumer must be provided").ThrowAsJavaScriptException(); + return ThrowError(env, "Kafka consumer must be provided"); } if (!info[2].IsNumber()) { - Napi::Error::New(env, "Timeout must be provided").ThrowAsJavaScriptException(); + return ThrowError(env, "Timeout must be provided"); } if (!info[3].IsFunction()) { - Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); - return env.Null(); + return ThrowError(env, "Need to specify a callback"); } - std::vector toppars = + std::vector toppars = Conversion::TopicPartition::FromV8Array(info[0].As()); - NodeKafka::KafkaConsumer* consumer = - ObjectWrap::Unwrap(info[1].As()); + + NodeKafka::KafkaConsumer *consumer = + ObjectWrap::Unwrap(info[1].As()); + int timeout_ms = info[2].As().Int32Value(); Napi::Function cb = info[3].As(); - Napi::FunctionReference *callback = new Napi::FunctionReference(cb); - - Producer* producer = ObjectWrap::Unwrap(info.This()); - Napi::AsyncQueueWorker(new Workers::ProducerSendOffsetsToTransaction( - callback, - producer, - toppars, - consumer, - timeout_ms)); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); + + Producer *producer = this; + + Napi::AsyncWorker *worker = new Workers::ProducerSendOffsetsToTransaction( + callback, producer, toppars, consumer, timeout_ms); + worker->Queue(); return env.Null(); } diff --git a/src/producer.h b/src/producer.h index 5b079c8f..48db04dc 100644 --- a/src/producer.h +++ b/src/producer.h @@ -51,8 +51,7 @@ class ProducerMessage { class Producer : public Connection { public: - static void Init(Napi::Object); - static Napi::Object NewInstance(Napi::Value); + static void Init(const Napi::Env&, Napi::Object); Baton Connect(); void Disconnect(); @@ -82,8 +81,8 @@ class Producer : public Connection { void ActivateDispatchers(); void DeactivateDispatchers(); - void ConfigureCallback(const std::string& string_key, - const Napi::Function& cb, bool add) override; + // void ConfigureCallback(const std::string& string_key, + // const Napi::Function& cb, bool add) override; Baton InitTransactions(int32_t timeout_ms); Baton BeginTransaction(); @@ -98,24 +97,24 @@ class Producer : public Connection { static Napi::FunctionReference constructor; static void New(const Napi::CallbackInfo&); - Producer(Conf*, Conf*); + Producer(const Napi::CallbackInfo& info); ~Producer(); private: - static Napi::Value NodeProduce(const Napi::CallbackInfo& info); - static Napi::Value NodeSetPartitioner(const Napi::CallbackInfo& info); - static Napi::Value NodeConnect(const Napi::CallbackInfo& info); - static Napi::Value NodeDisconnect(const Napi::CallbackInfo& info); - static Napi::Value NodePoll(const Napi::CallbackInfo& info); - static Napi::Value NodeSetPollInBackground(const Napi::CallbackInfo& info); + Napi::Value NodeProduce(const Napi::CallbackInfo& info); + Napi::Value NodeSetPartitioner(const Napi::CallbackInfo& info); + Napi::Value NodeConnect(const Napi::CallbackInfo& info); + Napi::Value NodeDisconnect(const Napi::CallbackInfo& info); + Napi::Value NodePoll(const Napi::CallbackInfo& info); + Napi::Value NodeSetPollInBackground(const Napi::CallbackInfo& info); #if RD_KAFKA_VERSION > 0x00090200 - static Napi::Value NodeFlush(const Napi::CallbackInfo& info); + Napi::Value NodeFlush(const Napi::CallbackInfo& info); #endif - static Napi::Value NodeInitTransactions(const Napi::CallbackInfo& info); - static Napi::Value NodeBeginTransaction(const Napi::CallbackInfo& info); - static Napi::Value NodeCommitTransaction(const Napi::CallbackInfo& info); - static Napi::Value NodeAbortTransaction(const Napi::CallbackInfo& info); - static Napi::Value NodeSendOffsetsToTransaction(const Napi::CallbackInfo& info); + Napi::Value NodeInitTransactions(const Napi::CallbackInfo& info); + Napi::Value NodeBeginTransaction(const Napi::CallbackInfo& info); + Napi::Value NodeCommitTransaction(const Napi::CallbackInfo& info); + Napi::Value NodeAbortTransaction(const Napi::CallbackInfo& info); + Napi::Value NodeSendOffsetsToTransaction(const Napi::CallbackInfo& info); Callbacks::Delivery m_dr_cb; Callbacks::Partitioner m_partitioner_cb; From 0ac2c22a4b57637d22c776bfa6583151372b5f5c Mon Sep 17 00:00:00 2001 From: Charles Lowell Date: Wed, 26 Mar 2025 15:29:04 -0500 Subject: [PATCH 10/14] admin client NAN -> NAPI --- src/admin.cc | 368 ++++++++++++++++++++++++------------------------- src/admin.h | 34 +++-- src/binding.cc | 6 +- src/topic.h | 2 +- 4 files changed, 198 insertions(+), 212 deletions(-) diff --git a/src/admin.cc b/src/admin.cc index 997a337e..a3175183 100644 --- a/src/admin.cc +++ b/src/admin.cc @@ -16,7 +16,7 @@ #include "src/workers.h" -using Napi::FunctionCallbackInfo; +using Napi::CallbackInfo; namespace NodeKafka { @@ -30,10 +30,6 @@ namespace NodeKafka { * @sa NodeKafka::Client */ -AdminClient::AdminClient(Conf *gconfig) : Connection(gconfig, NULL) {} - -AdminClient::AdminClient(Connection *connection) : Connection(connection) {} - AdminClient::~AdminClient() { Disconnect(); } @@ -100,56 +96,50 @@ Baton AdminClient::Disconnect() { Napi::FunctionReference AdminClient::constructor; -void AdminClient::Init(Napi::Object exports) { +void AdminClient::Init(const Napi::Env& env, Napi::Object exports) { Napi::HandleScope scope(env); - Napi::FunctionReference tpl = Napi::Function::New(env, New); - tpl->SetClassName(Napi::String::New(env, "AdminClient")); - - - // Inherited from NodeKafka::Connection - InstanceMethod("configureCallbacks", &NodeConfigureCallbacks), - InstanceMethod("name", &NodeName), - - // Admin client operations - InstanceMethod("createTopic", &NodeCreateTopic), - InstanceMethod("deleteTopic", &NodeDeleteTopic), - InstanceMethod("createPartitions", &NodeCreatePartitions), - InstanceMethod("deleteRecords", &NodeDeleteRecords), - InstanceMethod("describeTopics", &NodeDescribeTopics), - InstanceMethod("listOffsets", &NodeListOffsets), - - // Consumer group related operations - InstanceMethod("listGroups", &NodeListGroups), - InstanceMethod("describeGroups", &NodeDescribeGroups), - InstanceMethod("deleteGroups", &NodeDeleteGroups), - Napi::SetPrototypeMethod(tpl, "listConsumerGroupOffsets", - NodeListConsumerGroupOffsets); - - InstanceMethod("connect", &NodeConnect), - InstanceMethod("disconnect", &NodeDisconnect), - InstanceMethod("setSaslCredentials", &NodeSetSaslCredentials), - InstanceMethod("getMetadata", &NodeGetMetadata), - InstanceMethod("setOAuthBearerToken", &NodeSetOAuthBearerToken), - Napi::SetPrototypeMethod(tpl, "setOAuthBearerTokenFailure", - NodeSetOAuthBearerTokenFailure); - - constructor.Reset( - (tpl->GetFunction(Napi::GetCurrentContext()))); - (exports).Set(Napi::String::New(env, "AdminClient"), - tpl->GetFunction(Napi::GetCurrentContext())); + Napi::Function AdminClient = DefineClass(env, "AdminClient", { + // Inherited from NodeKafka::Connection + InstanceMethod("configureCallbacks", &AdminClient::NodeConfigureCallbacks), + InstanceMethod("name", &AdminClient::NodeName), + InstanceMethod("setOAuthBearerToken", &AdminClient::NodeSetOAuthBearerToken), + StaticMethod("setOAuthBearerTokenFailure", + &NodeSetOAuthBearerTokenFailure), + + // Admin client operations + InstanceMethod("createTopic", &AdminClient::NodeCreateTopic), + InstanceMethod("deleteTopic", &AdminClient::NodeDeleteTopic), + InstanceMethod("createPartitions", &AdminClient::NodeCreatePartitions), + InstanceMethod("deleteRecords", &AdminClient::NodeDeleteRecords), + InstanceMethod("describeTopics", &AdminClient::NodeDescribeTopics), + InstanceMethod("listOffsets", &AdminClient::NodeListOffsets), + + // Consumer group related operations + InstanceMethod("listGroups", &AdminClient::NodeListGroups), + InstanceMethod("describeGroups", &AdminClient::NodeDescribeGroups), + InstanceMethod("deleteGroups", &AdminClient::NodeDeleteGroups), + InstanceMethod("listConsumerGroupOffsets",&AdminClient::NodeListConsumerGroupOffsets), + InstanceMethod("connect", &AdminClient::NodeConnect), + InstanceMethod("disconnect", &AdminClient::NodeDisconnect), + InstanceMethod("setSaslCredentials", &AdminClient::NodeSetSaslCredentials), + InstanceMethod("getMetadata", &AdminClient::NodeGetMetadata), + }); + + constructor.Reset(AdminClient); + exports.Set(Napi::String::New(env, "AdminClient"), AdminClient); } -void AdminClient::New(const Napi::CallbackInfo& info) { +AdminClient::AdminClient(const Napi::CallbackInfo& info): Connection(info) { Napi::Env env = info.Env(); if (!info.IsConstructCall()) { Napi::Error::New(env, "non-constructor invocation not supported").ThrowAsJavaScriptException(); - return env.Null(); + return; } if (info.Length() < 1) { Napi::Error::New(env, "You must supply a global configuration or a preexisting client").ThrowAsJavaScriptException(); - return env.Null(); // NOLINT + return; } Connection *connection = NULL; @@ -159,54 +149,28 @@ void AdminClient::New(const Napi::CallbackInfo& info) { if (info.Length() >= 3 && !info[2].IsNull() && !info[2].IsUndefined()) { if (!info[2].IsObject()) { Napi::Error::New(env, "Third argument, if provided, must be a client object").ThrowAsJavaScriptException(); - return env.Null(); // NOLINT + return; } // We check whether this is a wrapped object within the calling JavaScript // code, so it's safe to unwrap it here. We Unwrap it directly into a // Connection object, since it's OK to unwrap into the parent class. - connection = ObjectWrap::Unwrap( - info[2].ToObject(Napi::GetCurrentContext())); - client = new AdminClient(connection); + connection = ObjectWrap::Unwrap(info[2].ToObject()); + this->ConfigFromExisting(connection); } else { if (!info[0].IsObject()) { Napi::Error::New(env, "Global configuration data must be specified").ThrowAsJavaScriptException(); - return env.Null(); + return; } std::string errstr; - gconfig = Conf::create( - RdKafka::Conf::CONF_GLOBAL, - (info[0].ToObject(Napi::GetCurrentContext())), errstr); + gconfig = Conf::create(RdKafka::Conf::CONF_GLOBAL, info[0].ToObject(), errstr); if (!gconfig) { Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); - return env.Null(); + return; } - client = new AdminClient(gconfig); + this->Config(gconfig, NULL); } - - // Wrap it - client->Wrap(info.This()); - - // Then there is some weird initialization that happens - // basically it sets the configuration data - // we don't need to do that because we lazy load it - - return info.This(); -} - -Napi::Object AdminClient::NewInstance(Napi::Value arg) { - Napi::Env env = arg.Env(); - Napi::EscapableHandleScope scope(env); - - const unsigned argc = 1; - - Napi::Value argv[argc] = { arg }; - Napi::Function cons = Napi::Function::New(env, constructor); - Napi::Object instance = - Napi::NewInstance(cons, argc, argv); - - return scope.Escape(instance); } /** @@ -996,40 +960,39 @@ void AdminClient::DeactivateDispatchers() { * C++ Exported prototype functions */ -Napi::Value AdminClient::NodeConnect(const Napi::CallbackInfo& info) { +Napi::Value AdminClient::NodeConnect(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); - AdminClient* client = ObjectWrap::Unwrap(info.This()); - // Activate the dispatchers before the connection, as some callbacks may run // on the background thread. // We will deactivate them if the connection fails. // Because the Admin Client connect is synchronous, we can do this within // AdminClient::Connect as well, but we do it here to keep the code similiar // to the Producer and Consumer. - client->ActivateDispatchers(); + this->ActivateDispatchers(); - Baton b = client->Connect(); + Baton b = this->Connect(); // Let the JS library throw if we need to so the error can be more rich int error_code = static_cast(b.err()); - return return Napi::Number::New(env, error_code); + return Napi::Number::New(env, error_code); } -Napi::Value AdminClient::NodeDisconnect(const Napi::CallbackInfo& info) { +Napi::Value AdminClient::NodeDisconnect(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); - AdminClient* client = ObjectWrap::Unwrap(info.This()); - - Baton b = client->Disconnect(); + Baton b = this->Disconnect(); // Let the JS library throw if we need to so the error can be more rich int error_code = static_cast(b.err()); - return return Napi::Number::New(env, error_code); + return Napi::Number::New(env, error_code); } /** * Create topic */ -Napi::Value AdminClient::NodeCreateTopic(const Napi::CallbackInfo& info) { +Napi::Value AdminClient::NodeCreateTopic(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 3 || !info[2].IsFunction()) { @@ -1045,8 +1008,10 @@ Napi::Value AdminClient::NodeCreateTopic(const Napi::CallbackInfo& info) { // Create the final callback object Napi::Function cb = info[2].As(); - Napi::FunctionReference *callback = new Napi::FunctionReference(cb); - AdminClient* client = ObjectWrap::Unwrap(info.This()); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); + + AdminClient* client = this; // Get the timeout int timeout = info[1].As().Int32Value(); @@ -1062,37 +1027,35 @@ Napi::Value AdminClient::NodeCreateTopic(const Napi::CallbackInfo& info) { } // Queue up dat work - Napi::AsyncQueueWorker( - new Workers::AdminClientCreateTopic(callback, client, topic, timeout)); - - return return env.Null(); + Napi::AsyncWorker* worker = new Workers::AdminClientCreateTopic(callback, client, topic, timeout); + worker->Queue(); + return env.Null(); } /** * Delete topic */ -Napi::Value AdminClient::NodeDeleteTopic(const Napi::CallbackInfo& info) { +Napi::Value AdminClient::NodeDeleteTopic(const Napi::CallbackInfo &info) { + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 3 || !info[2].IsFunction()) { // Just throw an exception - Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); - return env.Null(); + return ThrowError(env, "Need to specify a callback"); } if (!info[1].IsNumber() || !info[0].IsString()) { - Napi::Error::New(env, "Must provide 'timeout', and 'topicName'").ThrowAsJavaScriptException(); - return env.Null(); + return ThrowError(env, "Must provide 'timeout', and 'topicName'"); } // Create the final callback object Napi::Function cb = info[2].As(); - Napi::FunctionReference *callback = new Napi::FunctionReference(cb); - AdminClient* client = ObjectWrap::Unwrap(info.This()); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); + AdminClient* client = this; // Get the topic name from the string - std::string topic_name = Util::FromV8String( - info[0].To()); + std::string topic_name = Util::FromV8String(info[0].ToString()); // Get the timeout int timeout = info[1].As().Int32Value(); @@ -1102,16 +1065,16 @@ Napi::Value AdminClient::NodeDeleteTopic(const Napi::CallbackInfo& info) { topic_name.c_str()); // Queue up dat work - Napi::AsyncQueueWorker( - new Workers::AdminClientDeleteTopic(callback, client, topic, timeout)); - - return return env.Null(); + Napi::AsyncWorker* worker = new Workers::AdminClientDeleteTopic(callback, client, topic, timeout); + worker->Queue(); + return env.Null(); } /** * Delete topic */ -Napi::Value AdminClient::NodeCreatePartitions(const Napi::CallbackInfo& info) { +Napi::Value AdminClient::NodeCreatePartitions(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 4) { @@ -1127,14 +1090,16 @@ Napi::Value AdminClient::NodeCreatePartitions(const Napi::CallbackInfo& info) { } if (!info[2].IsNumber() || !info[1].IsNumber() || !info[0].IsString()) { - return Napi::ThrowError( + return ThrowError(env, "Must provide 'totalPartitions', 'timeout', and 'topicName'"); } // Create the final callback object Napi::Function cb = info[3].As(); - Napi::FunctionReference *callback = new Napi::FunctionReference(cb); - AdminClient* client = ObjectWrap::Unwrap(info.This()); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); + + AdminClient* client = this; // Get the timeout int timeout = info[2].As().Int32Value(); @@ -1143,8 +1108,7 @@ Napi::Value AdminClient::NodeCreatePartitions(const Napi::CallbackInfo& info) { int partition_total_count = info[1].As().Int32Value(); // Get the topic name from the string - std::string topic_name = Util::FromV8String( - info[0].To()); + std::string topic_name = Util::FromV8String(info[0].ToString()); // Create an error buffer we can throw char* errbuf = reinterpret_cast(malloc(100)); @@ -1161,16 +1125,18 @@ Napi::Value AdminClient::NodeCreatePartitions(const Napi::CallbackInfo& info) { } // Queue up dat work - Napi::AsyncQueueWorker(new Workers::AdminClientCreatePartitions( - callback, client, new_partitions, timeout)); + Napi::AsyncWorker* worker = new Workers::AdminClientCreatePartitions( + callback, client, new_partitions, timeout); - return return env.Null(); + worker->Queue(); + return env.Null(); } /** * List Consumer Groups. */ -Napi::Value AdminClient::NodeListGroups(const Napi::CallbackInfo& info) { +Napi::Value AdminClient::NodeListGroups(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 2 || !info[1].IsFunction()) { @@ -1188,8 +1154,10 @@ Napi::Value AdminClient::NodeListGroups(const Napi::CallbackInfo& info) { // Create the final callback object Napi::Function cb = info[1].As(); - Napi::FunctionReference *callback = new Napi::FunctionReference(cb); - AdminClient *client = ObjectWrap::Unwrap(info.This()); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); + + AdminClient *client = this; // Get the timeout - default 5000. int timeout_ms = GetParameter(config, "timeout", 5000); @@ -1198,51 +1166,50 @@ Napi::Value AdminClient::NodeListGroups(const Napi::CallbackInfo& info) { std::vector match_states; Napi::String match_consumer_group_states_key = Napi::String::New(env, "matchConsumerGroupStates"); - bool is_match_states_set = - (config).Has(match_consumer_group_states_key).FromMaybe(false); + bool is_match_states_set = config.Has(match_consumer_group_states_key); Napi::Array match_states_array = Napi::Array::New(env); if (is_match_states_set) { match_states_array = GetParameter( config, "matchConsumerGroupStates", match_states_array); - if (match_states_array->Length()) { + if (match_states_array.Length()) { match_states = Conversion::Admin::FromV8GroupStateArray( match_states_array); } } // Queue the work. - Napi::AsyncQueueWorker(new Workers::AdminClientListGroups( - callback, client, is_match_states_set, match_states, timeout_ms)); + Napi::AsyncWorker *worker = new Workers::AdminClientListGroups( + callback, client, is_match_states_set, match_states, timeout_ms); + worker->Queue(); + + return env.Null(); } /** * Describe Consumer Groups. */ -Napi::Value AdminClient::NodeDescribeGroups(const Napi::CallbackInfo& info) { +Napi::Value AdminClient::NodeDescribeGroups(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 3 || !info[2].IsFunction()) { // Just throw an exception - Napi::Error::New(env, "Need to specify a callback").ThrowAsJavaScriptException(); - return env.Null(); + return ThrowError(env, "Need to specify a callback"); } if (!info[0].IsArray()) { - Napi::Error::New(env, "Must provide group name array").ThrowAsJavaScriptException(); - return env.Null(); + return ThrowError(env, "Must provide group name array"); } if (!info[1].IsObject()) { - Napi::Error::New(env, "Must provide options object").ThrowAsJavaScriptException(); - return env.Null(); + return ThrowError(env, "Must provide options object"); } // Get list of group names to describe. Napi::Array group_names = info[0].As(); - if (group_names->Length() == 0) { - Napi::Error::New(env, "Must provide at least one group name").ThrowAsJavaScriptException(); - return env.Null(); + if (group_names.Length() == 0) { + return ThrowError(env, "Must provide at least one group name"); } std::vector group_names_vector = v8ArrayToStringVector(group_names); @@ -1258,19 +1225,23 @@ Napi::Value AdminClient::NodeDescribeGroups(const Napi::CallbackInfo& info) { // Create the final callback object Napi::Function cb = info[2].As(); - Napi::FunctionReference *callback = new Napi::FunctionReference(cb); - AdminClient *client = ObjectWrap::Unwrap(info.This()); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); + AdminClient *client = this; // Queue the work. - Napi::AsyncQueueWorker(new Workers::AdminClientDescribeGroups( + Napi::AsyncWorker *worker = new Workers::AdminClientDescribeGroups( callback, client, group_names_vector, include_authorized_operations, - timeout_ms)); + timeout_ms); + worker->Queue(); + return env.Null(); } /** * Delete Consumer Groups. */ -Napi::Value AdminClient::NodeDeleteGroups(const Napi::CallbackInfo& info) { +Napi::Value AdminClient::NodeDeleteGroups(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 3 || !info[2].IsFunction()) { @@ -1292,7 +1263,7 @@ Napi::Value AdminClient::NodeDeleteGroups(const Napi::CallbackInfo& info) { // Get list of group names to delete, and convert it into an // rd_kafka_DeleteGroup_t array. Napi::Array group_names = info[0].As(); - if (group_names->Length() == 0) { + if (group_names.Length() == 0) { Napi::Error::New(env, "Must provide at least one group name").ThrowAsJavaScriptException(); return env.Null(); } @@ -1313,18 +1284,22 @@ Napi::Value AdminClient::NodeDeleteGroups(const Napi::CallbackInfo& info) { // Create the final callback object Napi::Function cb = info[2].As(); - Napi::FunctionReference *callback = new Napi::FunctionReference(cb); - AdminClient *client = ObjectWrap::Unwrap(info.This()); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); + AdminClient *client = this; // Queue the work. - Napi::AsyncQueueWorker(new Workers::AdminClientDeleteGroups( - callback, client, group_list, group_names_vector.size(), timeout_ms)); + Napi::AsyncWorker *worker = new Workers::AdminClientDeleteGroups( + callback, client, group_list, group_names_vector.size(), timeout_ms); + worker->Queue(); + return env.Null(); } /** * List Consumer Group Offsets. */ -Napi::Value AdminClient::NodeListConsumerGroupOffsets(const Napi::CallbackInfo& info) { +Napi::Value AdminClient::NodeListConsumerGroupOffsets(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 3 || !info[2].IsFunction()) { @@ -1339,7 +1314,7 @@ Napi::Value AdminClient::NodeListConsumerGroupOffsets(const Napi::CallbackInfo& Napi::Array listGroupOffsets = info[0].As(); - if (listGroupOffsets->Length() == 0) { + if (listGroupOffsets.Length() == 0) { Napi::Error::New(env, "'listGroupOffsets' cannot be empty").ThrowAsJavaScriptException(); return env.Null(); } @@ -1352,9 +1327,9 @@ Napi::Value AdminClient::NodeListConsumerGroupOffsets(const Napi::CallbackInfo& rd_kafka_ListConsumerGroupOffsets_t **requests = static_cast( malloc(sizeof(rd_kafka_ListConsumerGroupOffsets_t *) * - listGroupOffsets->Length())); + listGroupOffsets.Length())); - for (uint32_t i = 0; i < listGroupOffsets->Length(); ++i) { + for (uint32_t i = 0; i < listGroupOffsets.Length(); ++i) { Napi::Value listGroupOffsetValue = (listGroupOffsets).Get(i); if (!listGroupOffsetValue.IsObject()) { @@ -1365,34 +1340,29 @@ Napi::Value AdminClient::NodeListConsumerGroupOffsets(const Napi::CallbackInfo& listGroupOffsetValue.As(); Napi::Value groupIdValue; - if (!(listGroupOffsetObj).Get(Napi::String::New(env, "groupId")) - .ToLocal(&groupIdValue)) { + if (!(listGroupOffsetObj).Has(Napi::String::New(env, "groupId"))) { Napi::Error::New(env, "Each entry must have 'groupId'").ThrowAsJavaScriptException(); return env.Null(); + } else { + groupIdValue = listGroupOffsetObj.Get(Napi::String::New(env, "groupId")); } - Napi::MaybeLocal groupIdMaybe = - groupIdValue.To(); - if (groupIdMaybe.IsEmpty()) { - Napi::Error::New(env, "'groupId' must be a string").ThrowAsJavaScriptException(); - return env.Null(); - } - std::string groupIdUtf8 = groupIdMaybe.ToLocalChecked(.As()); - std::string groupIdStr = *groupIdUtf8; + std::string groupIdStr = groupIdValue.ToString().Utf8Value(); - Napi::Value partitionsValue; rd_kafka_topic_partition_list_t *partitions = NULL; - if ((listGroupOffsetObj).Get(Napi::String::New(env, "partitions")) - .ToLocal(&partitionsValue) && - partitionsValue->IsArray()) { + Napi::MaybeOrValue partitionsValue = + listGroupOffsetObj.Get(Napi::String::New(env, "partitions")); + + + if (partitionsValue.IsArray()) { Napi::Array partitionsArray = partitionsValue.As(); - if (partitionsArray->Length() > 0) { + if (partitionsArray.Length() > 0) { partitions = Conversion::TopicPartition:: TopicPartitionv8ArrayToTopicPartitionList(partitionsArray, false); if (partitions == NULL) { - return Napi::ThrowError( + return ThrowError(env, "Failed to convert partitions to list, provide proper object in " "partitions"); } @@ -1416,19 +1386,23 @@ Napi::Value AdminClient::NodeListConsumerGroupOffsets(const Napi::CallbackInfo& // Create the final callback object Napi::Function cb = info[2].As(); - Napi::FunctionReference *callback = new Napi::FunctionReference(cb); - AdminClient *client = ObjectWrap::Unwrap(info.This()); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); + AdminClient *client = this; // Queue the worker to process the offset fetch request asynchronously - Napi::AsyncQueueWorker(new Workers::AdminClientListConsumerGroupOffsets( - callback, client, requests, listGroupOffsets->Length(), - require_stable_offsets, timeout_ms)); + Napi::AsyncWorker *worker = new Workers::AdminClientListConsumerGroupOffsets( + callback, client, requests, listGroupOffsets.Length(), + require_stable_offsets, timeout_ms); + worker->Queue(); + return env.Null(); } /** * Delete Records. */ -Napi::Value AdminClient::NodeDeleteRecords(const Napi::CallbackInfo& info) { +Napi::Value AdminClient::NodeDeleteRecords(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 3 || !info[2].IsFunction()) { @@ -1437,7 +1411,7 @@ Napi::Value AdminClient::NodeDeleteRecords(const Napi::CallbackInfo& info) { } if (!info[0].IsArray()) { - return Napi::ThrowError( + return ThrowError(env, "Must provide array containg 'TopicPartitionOffset' objects"); } @@ -1450,7 +1424,7 @@ Napi::Value AdminClient::NodeDeleteRecords(const Napi::CallbackInfo& info) { // and convert it into rd_kafka_DeleteRecords_t array Napi::Array delete_records_list = info[0].As(); - if (delete_records_list->Length() == 0) { + if (delete_records_list.Length() == 0) { Napi::Error::New(env, "Must provide at least one TopicPartitionOffset").ThrowAsJavaScriptException(); return env.Null(); } @@ -1468,7 +1442,7 @@ Napi::Value AdminClient::NodeDeleteRecords(const Napi::CallbackInfo& info) { Conversion::TopicPartition::TopicPartitionv8ArrayToTopicPartitionList( delete_records_list, true); if (partitions == NULL) { - return Napi::ThrowError( + return ThrowError(env, "Failed to convert objects in delete records list, provide proper " "TopicPartitionOffset objects"); } @@ -1485,18 +1459,24 @@ Napi::Value AdminClient::NodeDeleteRecords(const Napi::CallbackInfo& info) { // Create the final callback object Napi::Function cb = info[2].As(); - Napi::FunctionReference *callback = new Napi::FunctionReference(cb); - AdminClient *client = ObjectWrap::Unwrap(info.This()); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); + + AdminClient *client = this; // Queue the worker to process the offset fetch request asynchronously - Napi::AsyncQueueWorker(new Workers::AdminClientDeleteRecords( - callback, client, delete_records, 1, operation_timeout_ms, timeout_ms)); + Napi::AsyncWorker *worker = new Workers::AdminClientDeleteRecords( + callback, client, delete_records, 1, operation_timeout_ms, timeout_ms); + + worker->Queue(); + return env.Null(); } /** * Describe Topics. */ -Napi::Value AdminClient::NodeDescribeTopics(const Napi::CallbackInfo& info) { +Napi::Value AdminClient::NodeDescribeTopics(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 3 || !info[2].IsFunction()) { @@ -1511,7 +1491,7 @@ Napi::Value AdminClient::NodeDescribeTopics(const Napi::CallbackInfo& info) { Napi::Array topicNames = info[0].As(); - if (topicNames->Length() == 0) { + if (topicNames.Length() == 0) { Napi::Error::New(env, "'topicNames' cannot be empty").ThrowAsJavaScriptException(); return env.Null(); } @@ -1543,19 +1523,24 @@ Napi::Value AdminClient::NodeDescribeTopics(const Napi::CallbackInfo& info) { int timeout_ms = GetParameter(options, "timeout", 5000); Napi::Function cb = info[2].As(); - Napi::FunctionReference *callback = new Napi::FunctionReference(cb); - AdminClient *client = ObjectWrap::Unwrap(info.This()); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); + AdminClient *client = this; - Napi::AsyncQueueWorker(new Workers::AdminClientDescribeTopics( - callback, client, topic_collection, - include_authorised_operations, timeout_ms)); + Napi::AsyncWorker *worker = new Workers::AdminClientDescribeTopics( + callback, client, topic_collection, include_authorised_operations, + timeout_ms); + worker->Queue(); + + return env.Null(); } /** * List Offsets. */ -Napi::Value AdminClient::NodeListOffsets(const Napi::CallbackInfo& info) { +Napi::Value AdminClient::NodeListOffsets(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 3 || !info[2].IsFunction()) { @@ -1590,12 +1575,15 @@ Napi::Value AdminClient::NodeListOffsets(const Napi::CallbackInfo& info) { // Create the final callback object Napi::Function cb = info[2].As(); - Napi::FunctionReference *callback = new Napi::FunctionReference(cb); - AdminClient *client = ObjectWrap::Unwrap(info.This()); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); + AdminClient *client = this; // Queue the worker to process the offset fetch request asynchronously - Napi::AsyncQueueWorker(new Workers::AdminClientListOffsets( - callback, client, partitions, timeout_ms, isolation_level)); + Napi::AsyncWorker *worker = new Workers::AdminClientListOffsets( + callback, client, partitions, timeout_ms, isolation_level); + worker->Queue(); + return env.Null(); } } // namespace NodeKafka diff --git a/src/admin.h b/src/admin.h index 06825416..6f517ae6 100644 --- a/src/admin.h +++ b/src/admin.h @@ -39,9 +39,9 @@ namespace NodeKafka { class AdminClient : public Connection { public: - static void Init(Napi::Object); - static Napi::Object NewInstance(Napi::Value); + static void Init(const Napi::Env&, Napi::Object); + AdminClient(const Napi::CallbackInfo&); void ActivateDispatchers(); void DeactivateDispatchers(); @@ -76,12 +76,10 @@ class AdminClient : public Connection { rd_kafka_IsolationLevel_t isolation_level, rd_kafka_event_t** event_response); + void ConfigFromExisting(Connection* existing); protected: static Napi::FunctionReference constructor; - static void New(const Napi::CallbackInfo& info); - explicit AdminClient(Conf* globalConfig); - explicit AdminClient(Connection* existingConnection); ~AdminClient(); bool is_derived = false; @@ -89,21 +87,21 @@ class AdminClient : public Connection { private: // Node methods // static Napi::Value NodeValidateTopic(const Napi::CallbackInfo& info); - static Napi::Value NodeCreateTopic(const Napi::CallbackInfo& info); - static Napi::Value NodeDeleteTopic(const Napi::CallbackInfo& info); - static Napi::Value NodeCreatePartitions(const Napi::CallbackInfo& info); + Napi::Value NodeCreateTopic(const Napi::CallbackInfo& info); + Napi::Value NodeDeleteTopic(const Napi::CallbackInfo& info); + Napi::Value NodeCreatePartitions(const Napi::CallbackInfo& info); // Consumer group operations - static Napi::Value NodeListGroups(const Napi::CallbackInfo& info); - static Napi::Value NodeDescribeGroups(const Napi::CallbackInfo& info); - static Napi::Value NodeDeleteGroups(const Napi::CallbackInfo& info); - static Napi::Value NodeListConsumerGroupOffsets(const Napi::CallbackInfo& info); - static Napi::Value NodeDeleteRecords(const Napi::CallbackInfo& info); - static Napi::Value NodeDescribeTopics(const Napi::CallbackInfo& info); - static Napi::Value NodeListOffsets(const Napi::CallbackInfo& info); - - static Napi::Value NodeConnect(const Napi::CallbackInfo& info); - static Napi::Value NodeDisconnect(const Napi::CallbackInfo& info); + Napi::Value NodeListGroups(const Napi::CallbackInfo& info); + Napi::Value NodeDescribeGroups(const Napi::CallbackInfo& info); + Napi::Value NodeDeleteGroups(const Napi::CallbackInfo& info); + Napi::Value NodeListConsumerGroupOffsets(const Napi::CallbackInfo& info); + Napi::Value NodeDeleteRecords(const Napi::CallbackInfo& info); + Napi::Value NodeDescribeTopics(const Napi::CallbackInfo& info); + Napi::Value NodeListOffsets(const Napi::CallbackInfo& info); + + Napi::Value NodeConnect(const Napi::CallbackInfo& info); + Napi::Value NodeDisconnect(const Napi::CallbackInfo& info); }; } // namespace NodeKafka diff --git a/src/binding.cc b/src/binding.cc index 7d7753ad..9431f4c2 100644 --- a/src/binding.cc +++ b/src/binding.cc @@ -67,9 +67,9 @@ void ConstantsInit(Napi::Env env, Napi::Object exports) { Napi::Object Init(Napi::Env env, Napi::Object exports) { KafkaConsumer::Init(env, exports); - Producer::Init(exports); - AdminClient::Init(exports); - Topic::Init(exports); + Producer::Init(env, exports); + AdminClient::Init(env, exports); + Topic::Init(env, exports); ConstantsInit(env, exports); (exports).Set(Napi::String::New(env, "librdkafkaVersion"), diff --git a/src/topic.h b/src/topic.h index f9bba3d6..60fa04ec 100644 --- a/src/topic.h +++ b/src/topic.h @@ -23,7 +23,7 @@ namespace NodeKafka { class Topic : public Napi::ObjectWrap { public: - static void Init(Napi::Object); + static void Init(const Napi::Env&, Napi::Object); static Napi::Object NewInstance(Napi::Value arg); template Baton toRDKafkaTopic(Connection *handle); From ecc9be52dc15ba8f8c10b5ae919bcc39f50c4035 Mon Sep 17 00:00:00 2001 From: Charles Lowell Date: Wed, 26 Mar 2025 15:56:46 -0500 Subject: [PATCH 11/14] Topic NAN -> NAPI --- src/topic.cc | 83 ++++++++++++++++++---------------------------------- src/topic.h | 17 +++++------ 2 files changed, 36 insertions(+), 64 deletions(-) diff --git a/src/topic.cc b/src/topic.cc index d26f02a2..450d5dd6 100644 --- a/src/topic.cc +++ b/src/topic.cc @@ -8,9 +8,7 @@ */ #include -#include -#include "src/common.h" #include "src/connection.h" #include "src/topic.h" @@ -29,10 +27,11 @@ namespace NodeKafka { * @sa NodeKafka::Connection */ -Topic::Topic(std::string topic_name, RdKafka::Conf* config): - m_topic_name(topic_name), - m_config(config) { +void Topic::Setup(std::string topic_name, RdKafka::Conf *config) { + m_topic_name = topic_name; + // We probably want to copy the config. May require refactoring if we do not + m_config = config; } Topic::~Topic() { @@ -45,7 +44,7 @@ std::string Topic::name() { return m_topic_name; } -Baton Topic::toRDKafkaTopic(Connection* handle) { +template Baton Topic::toRDKafkaTopic(Connection* handle) { if (m_config) { return handle->CreateTopic(m_topic_name, m_config); } else { @@ -76,38 +75,34 @@ Baton offset_store (int32_t partition, int64_t offset) { Napi::FunctionReference Topic::constructor; -void Topic::Init(Napi::Object exports) { +void Topic::Init(const Napi::Env &env, Napi::Object exports) { Napi::HandleScope scope(env); - Napi::FunctionReference tpl = Napi::Function::New(env, New); - tpl->SetClassName(Napi::String::New(env, "Topic")); - - - InstanceMethod("name", &NodeGetName), + Napi::Function Topic = DefineClass(env, "Topic", { + InstanceMethod("name", &Topic::NodeGetName), + }); // connect. disconnect. resume. pause. get meta data - constructor.Reset((tpl->GetFunction(Napi::GetCurrentContext())) - ); + constructor.Reset(Topic); - (exports).Set(Napi::String::New(env, "Topic"), - tpl->GetFunction(Napi::GetCurrentContext())); + exports.Set(Napi::String::New(env, "Topic"), Topic); } -void Topic::New(const Napi::CallbackInfo& info) { +Topic::Topic(const Napi::CallbackInfo &info): ObjectWrap(info) { Napi::Env env = info.Env(); if (!info.IsConstructCall()) { Napi::Error::New(env, "non-constructor invocation not supported").ThrowAsJavaScriptException(); - return env.Null(); + return; } if (info.Length() < 1) { Napi::Error::New(env, "topic name is required").ThrowAsJavaScriptException(); - return env.Null(); + return; } if (!info[0].IsString()) { Napi::Error::New(env, "Topic name must be a string").ThrowAsJavaScriptException(); - return env.Null(); + return; } RdKafka::Conf* config = NULL; @@ -119,61 +114,39 @@ void Topic::New(const Napi::CallbackInfo& info) { std::string errstr; if (!info[1].IsObject()) { Napi::Error::New(env, "Configuration data must be specified").ThrowAsJavaScriptException(); - return env.Null(); + return; } - config = Conf::create(RdKafka::Conf::CONF_TOPIC, (info[1].ToObject(Napi::GetCurrentContext())), errstr); // NOLINT + config = Conf::create(RdKafka::Conf::CONF_TOPIC, info[1].ToObject(), errstr); // NOLINT if (!config) { Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); - return env.Null(); + return; } } - std::string parameterValue = info[0].As(.To()); - std::string topic_name(*parameterValue); - - Topic* topic = new Topic(topic_name, config); + std::string parameterValue = info[0].ToString(); + std::string topic_name(parameterValue); - // Wrap it - topic->Wrap(info.This()); - - // Then there is some weird initialization that happens - // basically it sets the configuration data - // we don't need to do that because we lazy load it - - return info.This(); + this->Setup(topic_name, config); } -// handle - -Napi::Object Topic::NewInstance(Napi::Value arg) { - Napi::Env env = arg.Env(); - Napi::EscapableHandleScope scope(env); - - const unsigned argc = 1; - - Napi::Value argv[argc] = { arg }; - Napi::Function cons = Napi::Function::New(env, constructor); - Napi::Object instance = - Napi::NewInstance(cons, argc, argv); - - return scope.Escape(instance); -} - -Napi::Value Topic::NodeGetName(const Napi::CallbackInfo& info) { +Napi::Value Topic::NodeGetName(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); Napi::HandleScope scope(env); - Topic* topic = ObjectWrap::Unwrap(info.This()); + Topic* topic = this; - return Napi::New(env, topic->name()); + return Napi::String::From(env, this->name()); } -Napi::Value Topic::NodePartitionAvailable(const Napi::CallbackInfo& info) { +Napi::Value Topic::NodePartitionAvailable(const Napi::CallbackInfo &info) { + return info.Env().Null(); // @TODO(sparente) } Napi::Value Topic::NodeOffsetStore(const Napi::CallbackInfo& info) { + return info.Env().Null(); // @TODO(sparente) } diff --git a/src/topic.h b/src/topic.h index 60fa04ec..294a9a0d 100644 --- a/src/topic.h +++ b/src/topic.h @@ -23,16 +23,15 @@ namespace NodeKafka { class Topic : public Napi::ObjectWrap { public: - static void Init(const Napi::Env&, Napi::Object); - static Napi::Object NewInstance(Napi::Value arg); + static void Init(const Napi::Env &, Napi::Object); + Topic(const Napi::CallbackInfo& info); template Baton toRDKafkaTopic(Connection *handle); protected: - static Napi::FunctionReference constructor; - static void New(const Napi::CallbackInfo& info); + static Napi::FunctionReference constructor; - static Napi::Value NodeGetMetadata(const Napi::CallbackInfo& info); + Napi::Value NodeGetMetadata(const Napi::CallbackInfo& info); // TopicConfig * config_; @@ -40,15 +39,15 @@ class Topic : public Napi::ObjectWrap { std::string name(); private: - Topic(std::string, RdKafka::Conf *); + void Setup(std::string, RdKafka::Conf *); ~Topic(); std::string m_topic_name; RdKafka::Conf * m_config; - static Napi::Value NodeGetName(const Napi::CallbackInfo& info); - static Napi::Value NodePartitionAvailable(const Napi::CallbackInfo& info); - static Napi::Value NodeOffsetStore(const Napi::CallbackInfo& info); + Napi::Value NodeGetName(const Napi::CallbackInfo& info); + Napi::Value NodePartitionAvailable(const Napi::CallbackInfo& info); + Napi::Value NodeOffsetStore(const Napi::CallbackInfo& info); }; } // namespace NodeKafka From acd8c4ad6b4597c890e9ab4d88602f46aa7f031b Mon Sep 17 00:00:00 2001 From: Charles Lowell Date: Wed, 26 Mar 2025 16:20:35 -0500 Subject: [PATCH 12/14] Fixup connection --- src/connection.h | 117 +++++++++++++++++++++-------------------------- 1 file changed, 53 insertions(+), 64 deletions(-) diff --git a/src/connection.h b/src/connection.h index ba94c035..85bc4a12 100644 --- a/src/connection.h +++ b/src/connection.h @@ -24,6 +24,7 @@ #include "src/errors.h" #include "src/config.h" #include "src/callbacks.h" +#include "src/workers.h" namespace NodeKafka { @@ -427,8 +428,9 @@ template class Connection : public Napi::ObjectWrap { Napi::FunctionReference* callback = new Napi::FunctionReference(); *callback = Napi::Persistent(cb); - Napi::AsyncWorker::Queue(new Workers::ConnectionMetadata( - callback, obj, topic, timeout_ms, allTopics)); + Napi::AsyncWorker *worker = new Workers::ConnectionMetadata( + callback, obj, topic, timeout_ms, allTopics); + worker->Queue(); return env.Null(); } @@ -446,24 +448,16 @@ template class Connection : public Napi::ObjectWrap { std::vector toppars = Conversion::TopicPartition::FromV8Array(info[0].As()); - int timeout_ms; - Napi::Maybe maybeTimeout = - info[1].As(.As().Uint32Value()); - - if (maybeTimeout.IsNothing()) { - timeout_ms = 1000; - } else { - timeout_ms = static_cast(maybeTimeout); - } + int timeout_ms = info[1].As().Int32Value(); Napi::Function cb = info[2].As(); Napi::FunctionReference callback = Napi::Persistent(cb); Connection* handle = this; - Napi::AsyncQueueWorker( - new Workers::Handle::OffsetsForTimes(callback, handle, - toppars, timeout_ms)); + Napi::AsyncWorker *worker = new Workers::Handle::OffsetsForTimes( + callback, handle, toppars, timeout_ms); + worker->Queue(); return env.Null(); } @@ -472,12 +466,12 @@ template class Connection : public Napi::ObjectWrap { Napi::Env env = info.Env(); Napi::HandleScope scope(env); - Connection* obj = ObjectWrap::Unwrap(info.This()); + Connection* obj = this; if (!info[0].IsString()) { Napi::Error::New(env, "1st parameter must be a topic string").ThrowAsJavaScriptException(); ; - return; + return env.Null(); } if (!info[1].IsNumber()) { @@ -496,9 +490,9 @@ template class Connection : public Napi::ObjectWrap { } // Get string pointer for the topic name - std::string topicUTF8 = info[0].As(.To()); + std::string topicUTF8 = info[0].ToString().Utf8Value(); // The first parameter is the topic - std::string topic_name(*topicUTF8); + std::string topic_name(topicUTF8); // Second parameter is the partition int32_t partition = info[1].As().Int32Value(); @@ -508,10 +502,12 @@ template class Connection : public Napi::ObjectWrap { // Fourth parameter is the callback Napi::Function cb = info[3].As(); - Napi::FunctionReference *callback = new Napi::FunctionReference(cb); + Napi::FunctionReference *callback = new Napi::FunctionReference(); + callback->Reset(cb); - Napi::AsyncQueueWorker(new Workers::ConnectionQueryWatermarkOffsets( - callback, obj, topic_name, partition, timeout_ms)); + Napi::AsyncWorker *worker = new Workers::ConnectionQueryWatermarkOffsets( + callback, obj, topic_name, partition, timeout_ms); + worker->Queue(); return env.Null(); } @@ -529,21 +525,19 @@ template class Connection : public Napi::ObjectWrap { } // Get string pointer for the username - std::string usernameUTF8 = info[0].As(.To()); + std::string usernameUTF8 = info[0].As().Utf8Value(); // The first parameter is the username - std::string username(*usernameUTF8); + std::string username(usernameUTF8); // Get string pointer for the password - std::string passwordUTF8 = info[1].As(.To()); + std::string passwordUTF8 = info[1].As().Utf8Value(); // The first parameter is the password - std::string password(*passwordUTF8); + std::string password(passwordUTF8); - Connection* obj = ObjectWrap::Unwrap(info.This()); - Baton b = obj->SetSaslCredentials(username, password); + Baton b = this->SetSaslCredentials(username, password); if (b.err() != RdKafka::ERR_NO_ERROR) { - Napi::Value errorObject = b.ToObject(); - Napi::Error::New(env, errorObject).ThrowAsJavaScriptException(); + b.ToError(env).ThrowAsJavaScriptException(); return env.Null(); } @@ -563,16 +557,16 @@ template class Connection : public Napi::ObjectWrap { Napi::Error::New(env, "Need to specify a callbacks object").ThrowAsJavaScriptException(); return env.Null(); } - v8::Local context = Napi::GetCurrentContext(); - Connection* obj = ObjectWrap::Unwrap(info.This()); + + Connection* obj = this; + + const bool add = info[0].As().Value(); - const bool add = info[0].As().Value().ToChecked(); - Napi::Object configs_object = - info[1].ToObject(context); + Napi::Object configs_object = info[1].ToObject(); Napi::Array configs_property_names = - configs_object->GetOwnPropertyNames(context); + configs_object.GetPropertyNames(); - for (unsigned int j = 0; j < configs_property_names->Length(); ++j) { + for (unsigned int j = 0; j < configs_property_names.Length(); ++j) { std::string configs_string_key; Napi::Value configs_key = @@ -583,7 +577,7 @@ template class Connection : public Napi::ObjectWrap { int config_type = 0; if (configs_value.IsObject() && configs_key.IsString()) { std::string configs_utf8_key = configs_key.As(); - configs_string_key = std::string(*configs_utf8_key); + configs_string_key = std::string(configs_utf8_key); if (configs_string_key.compare("global") == 0) { config_type = 1; } else if (configs_string_key.compare("topic") == 0) { @@ -598,11 +592,11 @@ template class Connection : public Napi::ObjectWrap { } Napi::Object object = - configs_value->ToObject(context); + configs_value.ToObject(); Napi::Array property_names = - object->GetOwnPropertyNames(context); + object.GetPropertyNames(); - for (unsigned int i = 0; i < property_names->Length(); ++i) { + for (unsigned int i = 0; i < property_names.Length(); ++i) { std::string errstr; std::string string_key; @@ -611,12 +605,12 @@ template class Connection : public Napi::ObjectWrap { if (key.IsString()) { std::string utf8_key = key.As(); - string_key = std::string(*utf8_key); + string_key = std::string(utf8_key); } else { continue; } - if (value->IsFunction()) { + if (value.IsFunction()) { Napi::Function cb = value.As(); switch (config_type) { case 1: @@ -641,7 +635,7 @@ template class Connection : public Napi::ObjectWrap { } } - return env.True(); + return Napi::Boolean::From(env, true); } Napi::Value NodeSetOAuthBearerToken(const Napi::CallbackInfo &info) { @@ -661,44 +655,43 @@ template class Connection : public Napi::ObjectWrap { return env.Null(); } - if (!info[3].IsNullOrUndefined() && !info[3].IsArray()) { + if (!info[3].IsNull() && !info[3].IsUndefined() && !info[3].IsArray()) { Napi::Error::New(env, "4th parameter must be an extensions array or null").ThrowAsJavaScriptException(); return env.Null(); } // Get string pointer for the token - std::string tokenUtf8 = info[0].As(.To()); - std::string token(*tokenUtf8); + std::string tokenUtf8 = info[0].As().Utf8Value(); + std::string token(tokenUtf8); // Get the lifetime_ms int64_t lifetime_ms = info[1].As().Int64Value(); // Get string pointer for the principal_name std::string principal_nameUtf8 = - info[2].As(.To()); - std::string principal_name(*principal_nameUtf8); + info[2].As().Utf8Value(); + std::string principal_name(principal_nameUtf8); // Get the extensions (if any) std::list extensions; - if (!info[3].IsNullOrUndefined()) { + if (!info[3].IsNull() && !info[3].IsUndefined()) { Napi::Array extensionsArray = info[3].As(); extensions = v8ArrayToStringList(extensionsArray); } - Connection* obj = ObjectWrap::Unwrap(info.This()); + Connection* obj = this; Baton b = obj->SetOAuthBearerToken(token, lifetime_ms, principal_name, extensions); if (b.err() != RdKafka::ERR_NO_ERROR) { - Napi::Value errorObject = b.ToObject(); - Napi::Error::New(env, errorObject).ThrowAsJavaScriptException(); + b.ToError(env).ThrowAsJavaScriptException(); return env.Null(); } return env.Null(); } - static Napi::Value NodeSetOAuthBearerTokenFailure(const Napi::CallbackInfo &info) { + Napi::Value NodeSetOAuthBearerTokenFailure(const Napi::CallbackInfo &info) { Napi::Env env = info.Env(); if (!info[0].IsString()) { Napi::Error::New(env, "1st parameter must be an error string").ThrowAsJavaScriptException(); @@ -706,15 +699,13 @@ template class Connection : public Napi::ObjectWrap { } // Get string pointer for the error string - std::string errstrUtf8 = info[0].As(.To()); - std::string errstr(*errstrUtf8); + std::string errstrUtf8 = info[0].As().Utf8Value(); + std::string errstr(errstrUtf8); - Connection* obj = ObjectWrap::Unwrap(info.This()); - Baton b = obj->SetOAuthBearerTokenFailure(errstr); + Baton b = this->SetOAuthBearerTokenFailure(errstr); if (b.err() != RdKafka::ERR_NO_ERROR) { - Napi::Value errorObject = b.ToObject(); - Napi::Error::New(env, errorObject).ThrowAsJavaScriptException(); + b.ToError(env).ThrowAsJavaScriptException(); return env.Null(); } @@ -723,11 +714,9 @@ template class Connection : public Napi::ObjectWrap { Napi::Value NodeName(const Napi::CallbackInfo &info) { Napi::Env env = info.Env(); - Connection* obj = ObjectWrap::Unwrap(info.This()); - std::string name = obj->Name(); - return Napi::New(env, name); - } - + + return Napi::String::From(env, this->Name()); + } }; } // namespace NodeKafka From a329109eacc1b7f973f854051a0de064b401b373 Mon Sep 17 00:00:00 2001 From: Charles Lowell Date: Wed, 26 Mar 2025 16:31:49 -0500 Subject: [PATCH 13/14] Fix header file ordering --- src/binding.h | 1 + 1 file changed, 1 insertion(+) diff --git a/src/binding.h b/src/binding.h index b1a5a422..b685575a 100644 --- a/src/binding.h +++ b/src/binding.h @@ -17,6 +17,7 @@ #include "src/common.h" #include "src/errors.h" #include "src/config.h" +#include "src/workers.h" #include "src/connection.h" #include "src/kafka-consumer.h" #include "src/producer.h" From 76668f747755c87ac7e387e900c05925f3c00ec1 Mon Sep 17 00:00:00 2001 From: Charles Lowell Date: Wed, 26 Mar 2025 23:43:43 -0500 Subject: [PATCH 14/14] cleanup whitespace --- package.json | 4 +- src/admin.cc | 214 +++++++++---------- src/binding.cc | 6 +- src/callbacks.cc | 120 +++++------ src/callbacks.h | 22 +- src/common.cc | 467 +++++++++++++++++++++--------------------- src/connection.h | 60 +++--- src/kafka-consumer.cc | 92 ++++----- src/kafka-consumer.h | 2 +- src/producer.cc | 104 +++++----- src/topic.h | 2 +- src/workers.cc | 308 ++++++++++++++-------------- src/workers.h | 24 +-- 13 files changed, 712 insertions(+), 713 deletions(-) diff --git a/package.json b/package.json index a89fb0a1..99decd84 100644 --- a/package.json +++ b/package.json @@ -55,7 +55,7 @@ "dependencies": { "node-addon-api": "8.3.1", "@mapbox/node-pre-gyp": "^1.0.11", - "bindings": "^1.3.1", + "bindings": "^1.3.1" }, "engines": { "node": ">=18.0.0" @@ -65,4 +65,4 @@ "schemaregistry", "schemaregistry-examples" ] -} \ No newline at end of file +} diff --git a/src/admin.cc b/src/admin.cc index a3175183..80bf903d 100644 --- a/src/admin.cc +++ b/src/admin.cc @@ -43,8 +43,8 @@ Baton AdminClient::Connect() { * client, as it should always be connected. */ if (m_has_underlying) { return Baton(RdKafka::ERR__STATE, - "Existing client is not connected, and dependent client " - "cannot initiate connection."); + "Existing client is not connected, and dependent client " + "cannot initiate connection."); } Baton baton = setupSaslOAuthBearerConfig(); @@ -102,7 +102,7 @@ void AdminClient::Init(const Napi::Env& env, Napi::Object exports) { Napi::Function AdminClient = DefineClass(env, "AdminClient", { // Inherited from NodeKafka::Connection InstanceMethod("configureCallbacks", &AdminClient::NodeConfigureCallbacks), - InstanceMethod("name", &AdminClient::NodeName), + InstanceMethod("name", &AdminClient::NodeName), InstanceMethod("setOAuthBearerToken", &AdminClient::NodeSetOAuthBearerToken), StaticMethod("setOAuthBearerTokenFailure", &NodeSetOAuthBearerTokenFailure), @@ -125,7 +125,7 @@ void AdminClient::Init(const Napi::Env& env, Napi::Object exports) { InstanceMethod("setSaslCredentials", &AdminClient::NodeSetSaslCredentials), InstanceMethod("getMetadata", &AdminClient::NodeGetMetadata), }); - + constructor.Reset(AdminClient); exports.Set(Napi::String::New(env, "AdminClient"), AdminClient); } @@ -155,7 +155,7 @@ AdminClient::AdminClient(const Napi::CallbackInfo& info): Connection(info) { // code, so it's safe to unwrap it here. We Unwrap it directly into a // Connection object, since it's OK to unwrap into the parent class. connection = ObjectWrap::Unwrap(info[2].ToObject()); - this->ConfigFromExisting(connection); + this->ConfigFromExisting(connection); } else { if (!info[0].IsObject()) { Napi::Error::New(env, "Global configuration data must be specified").ThrowAsJavaScriptException(); @@ -282,14 +282,14 @@ Baton AdminClient::CreateTopic(rd_kafka_NewTopic_t* topic, int timeout_ms) { const char *errmsg = rd_kafka_topic_result_error_string(terr); if (errcode != RD_KAFKA_RESP_ERR_NO_ERROR) { - if (errmsg) { - const std::string errormsg = std::string(errmsg); - rd_kafka_event_destroy(event_response); - return Baton(static_cast(errcode), errormsg); // NOLINT - } else { - rd_kafka_event_destroy(event_response); - return Baton(static_cast(errcode)); - } + if (errmsg) { + const std::string errormsg = std::string(errmsg); + rd_kafka_event_destroy(event_response); + return Baton(static_cast(errcode), errormsg); // NOLINT + } else { + rd_kafka_event_destroy(event_response); + return Baton(static_cast(errcode)); + } } } @@ -360,8 +360,8 @@ Baton AdminClient::DeleteTopic(rd_kafka_DeleteTopic_t* topic, int timeout_ms) { const rd_kafka_resp_err_t errcode = rd_kafka_topic_result_error(terr); if (errcode != RD_KAFKA_RESP_ERR_NO_ERROR) { - rd_kafka_event_destroy(event_response); - return Baton(static_cast(errcode)); + rd_kafka_event_destroy(event_response); + return Baton(static_cast(errcode)); } } @@ -436,14 +436,14 @@ Baton AdminClient::CreatePartitions( const char *errmsg = rd_kafka_topic_result_error_string(terr); if (errcode != RD_KAFKA_RESP_ERR_NO_ERROR) { - if (errmsg) { - const std::string errormsg = std::string(errmsg); - rd_kafka_event_destroy(event_response); - return Baton(static_cast(errcode), errormsg); // NOLINT - } else { - rd_kafka_event_destroy(event_response); - return Baton(static_cast(errcode)); - } + if (errmsg) { + const std::string errormsg = std::string(errmsg); + rd_kafka_event_destroy(event_response); + return Baton(static_cast(errcode), errormsg); // NOLINT + } else { + rd_kafka_event_destroy(event_response); + return Baton(static_cast(errcode)); + } } } @@ -468,21 +468,21 @@ Baton AdminClient::ListGroups( // Make admin options to establish that we are listing groups rd_kafka_AdminOptions_t *options = rd_kafka_AdminOptions_new( - m_client->c_ptr(), RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS); + m_client->c_ptr(), RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS); char errstr[512]; rd_kafka_resp_err_t err = rd_kafka_AdminOptions_set_request_timeout( - options, timeout_ms, errstr, sizeof(errstr)); + options, timeout_ms, errstr, sizeof(errstr)); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { return Baton(static_cast(err), errstr); } if (is_match_states_set) { rd_kafka_error_t *error = - rd_kafka_AdminOptions_set_match_consumer_group_states( - options, &match_states[0], match_states.size()); + rd_kafka_AdminOptions_set_match_consumer_group_states( + options, &match_states[0], match_states.size()); if (error) { - return Baton::BatonFromErrorAndDestroy(error); + return Baton::BatonFromErrorAndDestroy(error); } } @@ -495,7 +495,7 @@ Baton AdminClient::ListGroups( // DON'T destroy the event. It is the out parameter, and ownership is // the caller's. *event_response = PollForEvent( - rkqu, RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT, timeout_ms); + rkqu, RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT, timeout_ms); // Destroy the queue since we are done with it. rd_kafka_queue_destroy(rkqu); @@ -523,9 +523,9 @@ Baton AdminClient::ListGroups( } Baton AdminClient::DescribeGroups(std::vector &groups, - bool include_authorized_operations, - int timeout_ms, - /* out */ rd_kafka_event_t **event_response) { + bool include_authorized_operations, + int timeout_ms, + /* out */ rd_kafka_event_t **event_response) { if (!IsConnected()) { return Baton(RdKafka::ERR__STATE); } @@ -538,21 +538,21 @@ Baton AdminClient::DescribeGroups(std::vector &groups, // Make admin options to establish that we are describing groups rd_kafka_AdminOptions_t *options = rd_kafka_AdminOptions_new( - m_client->c_ptr(), RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS); + m_client->c_ptr(), RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS); char errstr[512]; rd_kafka_resp_err_t err = rd_kafka_AdminOptions_set_request_timeout( - options, timeout_ms, errstr, sizeof(errstr)); + options, timeout_ms, errstr, sizeof(errstr)); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { return Baton(static_cast(err), errstr); } if (include_authorized_operations) { rd_kafka_error_t *error = - rd_kafka_AdminOptions_set_include_authorized_operations( - options, include_authorized_operations); + rd_kafka_AdminOptions_set_include_authorized_operations( + options, include_authorized_operations); if (error) { - return Baton::BatonFromErrorAndDestroy(error); + return Baton::BatonFromErrorAndDestroy(error); } } @@ -566,13 +566,13 @@ Baton AdminClient::DescribeGroups(std::vector &groups, } rd_kafka_DescribeConsumerGroups(m_client->c_ptr(), &c_groups[0], - groups.size(), options, rkqu); + groups.size(), options, rkqu); // Poll for an event by type in that queue // DON'T destroy the event. It is the out parameter, and ownership is // the caller's. *event_response = PollForEvent( - rkqu, RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT, timeout_ms); + rkqu, RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT, timeout_ms); // Destroy the queue since we are done with it. rd_kafka_queue_destroy(rkqu); @@ -600,8 +600,8 @@ Baton AdminClient::DescribeGroups(std::vector &groups, } Baton AdminClient::DeleteGroups(rd_kafka_DeleteGroup_t **group_list, - size_t group_cnt, int timeout_ms, - /* out */ rd_kafka_event_t **event_response) { + size_t group_cnt, int timeout_ms, + /* out */ rd_kafka_event_t **event_response) { if (!IsConnected()) { return Baton(RdKafka::ERR__STATE); } @@ -614,11 +614,11 @@ Baton AdminClient::DeleteGroups(rd_kafka_DeleteGroup_t **group_list, // Make admin options to establish that we are deleting groups rd_kafka_AdminOptions_t *options = rd_kafka_AdminOptions_new( - m_client->c_ptr(), RD_KAFKA_ADMIN_OP_DELETEGROUPS); + m_client->c_ptr(), RD_KAFKA_ADMIN_OP_DELETEGROUPS); char errstr[512]; rd_kafka_resp_err_t err = rd_kafka_AdminOptions_set_request_timeout( - options, timeout_ms, errstr, sizeof(errstr)); + options, timeout_ms, errstr, sizeof(errstr)); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { return Baton(static_cast(err), errstr); } @@ -627,13 +627,13 @@ Baton AdminClient::DeleteGroups(rd_kafka_DeleteGroup_t **group_list, rd_kafka_queue_t *rkqu = rd_kafka_queue_new(m_client->c_ptr()); rd_kafka_DeleteGroups(m_client->c_ptr(), group_list, group_cnt, options, - rkqu); + rkqu); // Poll for an event by type in that queue // DON'T destroy the event. It is the out parameter, and ownership is // the caller's. *event_response = - PollForEvent(rkqu, RD_KAFKA_EVENT_DELETEGROUPS_RESULT, timeout_ms); + PollForEvent(rkqu, RD_KAFKA_EVENT_DELETEGROUPS_RESULT, timeout_ms); // Destroy the queue since we are done with it. rd_kafka_queue_destroy(rkqu); @@ -676,21 +676,21 @@ Baton AdminClient::ListConsumerGroupOffsets( // Make admin options to establish that we are fetching offsets rd_kafka_AdminOptions_t *options = rd_kafka_AdminOptions_new( - m_client->c_ptr(), RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS); + m_client->c_ptr(), RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS); char errstr[512]; rd_kafka_resp_err_t err = rd_kafka_AdminOptions_set_request_timeout( - options, timeout_ms, errstr, sizeof(errstr)); + options, timeout_ms, errstr, sizeof(errstr)); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { return Baton(static_cast(err), errstr); } if (require_stable_offsets) { rd_kafka_error_t *error = - rd_kafka_AdminOptions_set_require_stable_offsets( - options, require_stable_offsets); + rd_kafka_AdminOptions_set_require_stable_offsets( + options, require_stable_offsets); if (error) { - return Baton::BatonFromErrorAndDestroy(error); + return Baton::BatonFromErrorAndDestroy(error); } } @@ -698,13 +698,13 @@ Baton AdminClient::ListConsumerGroupOffsets( rd_kafka_queue_t *rkqu = rd_kafka_queue_new(m_client->c_ptr()); rd_kafka_ListConsumerGroupOffsets(m_client->c_ptr(), req, req_cnt, options, - rkqu); + rkqu); // Poll for an event by type in that queue // DON'T destroy the event. It is the out parameter, and ownership is // the caller's. *event_response = PollForEvent( - rkqu, RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT, timeout_ms); + rkqu, RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT, timeout_ms); // Destroy the queue since we are done with it. rd_kafka_queue_destroy(rkqu); @@ -732,9 +732,9 @@ Baton AdminClient::ListConsumerGroupOffsets( } Baton AdminClient::DeleteRecords(rd_kafka_DeleteRecords_t **del_records, - size_t del_records_cnt, - int operation_timeout_ms, int timeout_ms, - rd_kafka_event_t **event_response) { + size_t del_records_cnt, + int operation_timeout_ms, int timeout_ms, + rd_kafka_event_t **event_response) { if (!IsConnected()) { return Baton(RdKafka::ERR__STATE); } @@ -747,17 +747,17 @@ Baton AdminClient::DeleteRecords(rd_kafka_DeleteRecords_t **del_records, // Make admin options to establish that we are deleting records rd_kafka_AdminOptions_t *options = rd_kafka_AdminOptions_new( - m_client->c_ptr(), RD_KAFKA_ADMIN_OP_DELETERECORDS); + m_client->c_ptr(), RD_KAFKA_ADMIN_OP_DELETERECORDS); char errstr[512]; rd_kafka_resp_err_t err = rd_kafka_AdminOptions_set_request_timeout( - options, timeout_ms, errstr, sizeof(errstr)); + options, timeout_ms, errstr, sizeof(errstr)); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { return Baton(static_cast(err), errstr); } err = rd_kafka_AdminOptions_set_operation_timeout( - options, operation_timeout_ms, errstr, sizeof(errstr)); + options, operation_timeout_ms, errstr, sizeof(errstr)); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { return Baton(static_cast(err), errstr); } @@ -766,13 +766,13 @@ Baton AdminClient::DeleteRecords(rd_kafka_DeleteRecords_t **del_records, rd_kafka_queue_t *rkqu = rd_kafka_queue_new(m_client->c_ptr()); rd_kafka_DeleteRecords(m_client->c_ptr(), del_records, - del_records_cnt, options, rkqu); + del_records_cnt, options, rkqu); // Poll for an event by type in that queue // DON'T destroy the event. It is the out parameter, and ownership is // the caller's. *event_response = - PollForEvent(rkqu, RD_KAFKA_EVENT_DELETERECORDS_RESULT, timeout_ms); + PollForEvent(rkqu, RD_KAFKA_EVENT_DELETERECORDS_RESULT, timeout_ms); // Destroy the queue since we are done with it. rd_kafka_queue_destroy(rkqu); @@ -800,9 +800,9 @@ Baton AdminClient::DeleteRecords(rd_kafka_DeleteRecords_t **del_records, } Baton AdminClient::DescribeTopics(rd_kafka_TopicCollection_t *topics, - bool include_authorized_operations, - int timeout_ms, - rd_kafka_event_t **event_response) { + bool include_authorized_operations, + int timeout_ms, + rd_kafka_event_t **event_response) { if (!IsConnected()) { return Baton(RdKafka::ERR__STATE); } @@ -815,20 +815,20 @@ Baton AdminClient::DescribeTopics(rd_kafka_TopicCollection_t *topics, // Make admin options to establish that we are describing topics rd_kafka_AdminOptions_t *options = rd_kafka_AdminOptions_new( - m_client->c_ptr(), RD_KAFKA_ADMIN_OP_DESCRIBETOPICS); + m_client->c_ptr(), RD_KAFKA_ADMIN_OP_DESCRIBETOPICS); if (include_authorized_operations) { rd_kafka_error_t *error = - rd_kafka_AdminOptions_set_include_authorized_operations( - options, include_authorized_operations); + rd_kafka_AdminOptions_set_include_authorized_operations( + options, include_authorized_operations); if (error) { - return Baton::BatonFromErrorAndDestroy(error); + return Baton::BatonFromErrorAndDestroy(error); } } char errstr[512]; rd_kafka_resp_err_t err = rd_kafka_AdminOptions_set_request_timeout( - options, timeout_ms, errstr, sizeof(errstr)); + options, timeout_ms, errstr, sizeof(errstr)); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { return Baton(static_cast(err), errstr); } @@ -842,7 +842,7 @@ Baton AdminClient::DescribeTopics(rd_kafka_TopicCollection_t *topics, // DON'T destroy the event. It is the out parameter, and ownership is // the caller's. *event_response = - PollForEvent(rkqu, RD_KAFKA_EVENT_DESCRIBETOPICS_RESULT, timeout_ms); + PollForEvent(rkqu, RD_KAFKA_EVENT_DESCRIBETOPICS_RESULT, timeout_ms); // Destroy the queue since we are done with it. rd_kafka_queue_destroy(rkqu); @@ -871,9 +871,9 @@ Baton AdminClient::DescribeTopics(rd_kafka_TopicCollection_t *topics, Baton AdminClient::ListOffsets(rd_kafka_topic_partition_list_t *partitions, - int timeout_ms, - rd_kafka_IsolationLevel_t isolation_level, - rd_kafka_event_t **event_response) { + int timeout_ms, + rd_kafka_IsolationLevel_t isolation_level, + rd_kafka_event_t **event_response) { if (!IsConnected()) { return Baton(RdKafka::ERR__STATE); } @@ -886,17 +886,17 @@ Baton AdminClient::ListOffsets(rd_kafka_topic_partition_list_t *partitions, // Make admin options to establish that we are fetching offsets rd_kafka_AdminOptions_t *options = rd_kafka_AdminOptions_new( - m_client->c_ptr(), RD_KAFKA_ADMIN_OP_LISTOFFSETS); + m_client->c_ptr(), RD_KAFKA_ADMIN_OP_LISTOFFSETS); char errstr[512]; rd_kafka_resp_err_t err = rd_kafka_AdminOptions_set_request_timeout( - options, timeout_ms, errstr, sizeof(errstr)); + options, timeout_ms, errstr, sizeof(errstr)); if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { return Baton(static_cast(err), errstr); } rd_kafka_error_t *error = - rd_kafka_AdminOptions_set_isolation_level(options, isolation_level); + rd_kafka_AdminOptions_set_isolation_level(options, isolation_level); if (error) { return Baton::BatonFromErrorAndDestroy(error); } @@ -910,7 +910,7 @@ Baton AdminClient::ListOffsets(rd_kafka_topic_partition_list_t *partitions, // DON'T destroy the event. It is the out parameter, and ownership is // the caller's. *event_response = - PollForEvent(rkqu, RD_KAFKA_EVENT_LISTOFFSETS_RESULT, timeout_ms); + PollForEvent(rkqu, RD_KAFKA_EVENT_LISTOFFSETS_RESULT, timeout_ms); // Destroy the queue since we are done with it. rd_kafka_queue_destroy(rkqu); @@ -961,7 +961,7 @@ void AdminClient::DeactivateDispatchers() { */ Napi::Value AdminClient::NodeConnect(const Napi::CallbackInfo &info) { - const Napi::Env env = info.Env(); + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); // Activate the dispatchers before the connection, as some callbacks may run @@ -979,7 +979,7 @@ Napi::Value AdminClient::NodeConnect(const Napi::CallbackInfo &info) { } Napi::Value AdminClient::NodeDisconnect(const Napi::CallbackInfo &info) { - const Napi::Env env = info.Env(); + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); Baton b = this->Disconnect(); @@ -1036,7 +1036,7 @@ Napi::Value AdminClient::NodeCreateTopic(const Napi::CallbackInfo &info) { * Delete topic */ Napi::Value AdminClient::NodeDeleteTopic(const Napi::CallbackInfo &info) { - const Napi::Env env = info.Env(); + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 3 || !info[2].IsFunction()) { @@ -1098,7 +1098,7 @@ Napi::Value AdminClient::NodeCreatePartitions(const Napi::CallbackInfo &info) { Napi::Function cb = info[3].As(); Napi::FunctionReference *callback = new Napi::FunctionReference(); callback->Reset(cb); - + AdminClient* client = this; // Get the timeout @@ -1136,7 +1136,7 @@ Napi::Value AdminClient::NodeCreatePartitions(const Napi::CallbackInfo &info) { * List Consumer Groups. */ Napi::Value AdminClient::NodeListGroups(const Napi::CallbackInfo &info) { - Napi::Env env = info.Env(); + Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 2 || !info[1].IsFunction()) { @@ -1156,7 +1156,7 @@ Napi::Value AdminClient::NodeListGroups(const Napi::CallbackInfo &info) { Napi::Function cb = info[1].As(); Napi::FunctionReference *callback = new Napi::FunctionReference(); callback->Reset(cb); - + AdminClient *client = this; // Get the timeout - default 5000. @@ -1171,10 +1171,10 @@ Napi::Value AdminClient::NodeListGroups(const Napi::CallbackInfo &info) { if (is_match_states_set) { match_states_array = GetParameter( - config, "matchConsumerGroupStates", match_states_array); + config, "matchConsumerGroupStates", match_states_array); if (match_states_array.Length()) { match_states = Conversion::Admin::FromV8GroupStateArray( - match_states_array); + match_states_array); } } @@ -1182,7 +1182,7 @@ Napi::Value AdminClient::NodeListGroups(const Napi::CallbackInfo &info) { Napi::AsyncWorker *worker = new Workers::AdminClientListGroups( callback, client, is_match_states_set, match_states, timeout_ms); worker->Queue(); - + return env.Null(); } @@ -1326,18 +1326,18 @@ Napi::Value AdminClient::NodeListConsumerGroupOffsets(const Napi::CallbackInfo & */ rd_kafka_ListConsumerGroupOffsets_t **requests = static_cast( - malloc(sizeof(rd_kafka_ListConsumerGroupOffsets_t *) * - listGroupOffsets.Length())); + malloc(sizeof(rd_kafka_ListConsumerGroupOffsets_t *) * + listGroupOffsets.Length())); for (uint32_t i = 0; i < listGroupOffsets.Length(); ++i) { Napi::Value listGroupOffsetValue = - (listGroupOffsets).Get(i); + (listGroupOffsets).Get(i); if (!listGroupOffsetValue.IsObject()) { Napi::Error::New(env, "Each entry must be an object").ThrowAsJavaScriptException(); return env.Null(); } Napi::Object listGroupOffsetObj = - listGroupOffsetValue.As(); + listGroupOffsetValue.As(); Napi::Value groupIdValue; if (!(listGroupOffsetObj).Has(Napi::String::New(env, "groupId"))) { @@ -1352,25 +1352,25 @@ Napi::Value AdminClient::NodeListConsumerGroupOffsets(const Napi::CallbackInfo & rd_kafka_topic_partition_list_t *partitions = NULL; Napi::MaybeOrValue partitionsValue = - listGroupOffsetObj.Get(Napi::String::New(env, "partitions")); + listGroupOffsetObj.Get(Napi::String::New(env, "partitions")); + - if (partitionsValue.IsArray()) { Napi::Array partitionsArray = partitionsValue.As(); if (partitionsArray.Length() > 0) { - partitions = Conversion::TopicPartition:: - TopicPartitionv8ArrayToTopicPartitionList(partitionsArray, false); - if (partitions == NULL) { - return ThrowError(env, - "Failed to convert partitions to list, provide proper object in " - "partitions"); - } + partitions = Conversion::TopicPartition:: + TopicPartitionv8ArrayToTopicPartitionList(partitionsArray, false); + if (partitions == NULL) { + return ThrowError(env, + "Failed to convert partitions to list, provide proper object in " + "partitions"); + } } } requests[i] = - rd_kafka_ListConsumerGroupOffsets_new(groupIdStr.c_str(), partitions); + rd_kafka_ListConsumerGroupOffsets_new(groupIdStr.c_str(), partitions); if (partitions != NULL) { rd_kafka_topic_partition_list_destroy(partitions); @@ -1412,7 +1412,7 @@ Napi::Value AdminClient::NodeDeleteRecords(const Napi::CallbackInfo &info) { if (!info[0].IsArray()) { return ThrowError(env, - "Must provide array containg 'TopicPartitionOffset' objects"); + "Must provide array containg 'TopicPartitionOffset' objects"); } if (!info[1].IsObject()) { @@ -1436,15 +1436,15 @@ Napi::Value AdminClient::NodeDeleteRecords(const Napi::CallbackInfo &info) { */ rd_kafka_DeleteRecords_t **delete_records = static_cast( - malloc(sizeof(rd_kafka_DeleteRecords_t *) * 1)); + malloc(sizeof(rd_kafka_DeleteRecords_t *) * 1)); rd_kafka_topic_partition_list_t *partitions = Conversion::TopicPartition::TopicPartitionv8ArrayToTopicPartitionList( - delete_records_list, true); + delete_records_list, true); if (partitions == NULL) { return ThrowError(env, - "Failed to convert objects in delete records list, provide proper " - "TopicPartitionOffset objects"); + "Failed to convert objects in delete records list, provide proper " + "TopicPartitionOffset objects"); } delete_records[0] = rd_kafka_DeleteRecords_new(partitions); @@ -1531,7 +1531,7 @@ Napi::Value AdminClient::NodeDescribeTopics(const Napi::CallbackInfo &info) { callback, client, topic_collection, include_authorised_operations, timeout_ms); worker->Queue(); - + return env.Null(); } @@ -1568,8 +1568,8 @@ Napi::Value AdminClient::NodeListOffsets(const Napi::CallbackInfo &info) { rd_kafka_IsolationLevel_t isolation_level = static_cast(GetParameter( - options, "isolationLevel", - static_cast(RD_KAFKA_ISOLATION_LEVEL_READ_UNCOMMITTED))); + options, "isolationLevel", + static_cast(RD_KAFKA_ISOLATION_LEVEL_READ_UNCOMMITTED))); int timeout_ms = GetParameter(options, "timeout", 5000); diff --git a/src/binding.cc b/src/binding.cc index 9431f4c2..21d907c1 100644 --- a/src/binding.cc +++ b/src/binding.cc @@ -60,9 +60,9 @@ void ConstantsInit(Napi::Env env, Napi::Object exports) { (exports).Set(Napi::String::New(env, "topic"), topicConstants); - (exports).Set(Napi::String::New(env, "err2str"),Napi::Function::New(env, NodeRdKafkaErr2Str)); + (exports).Set(Napi::String::New(env, "err2str"),Napi::Function::New(env, NodeRdKafkaErr2Str)); - (exports).Set(Napi::String::New(env, "features"), Napi::Function::New(env, NodeRdKafkaBuildInFeatures)); + (exports).Set(Napi::String::New(env, "features"), Napi::Function::New(env, NodeRdKafkaBuildInFeatures)); } Napi::Object Init(Napi::Env env, Napi::Object exports) { @@ -73,7 +73,7 @@ Napi::Object Init(Napi::Env env, Napi::Object exports) { ConstantsInit(env, exports); (exports).Set(Napi::String::New(env, "librdkafkaVersion"), - Napi::String::New(env, RdKafka::version_str().c_str())); + Napi::String::New(env, RdKafka::version_str().c_str())); return exports; } diff --git a/src/callbacks.cc b/src/callbacks.cc index 23f727a8..41af5bca 100644 --- a/src/callbacks.cc +++ b/src/callbacks.cc @@ -41,7 +41,7 @@ Napi::Array TopicPartitionListToV8Array( if (tp.offset >= 0) { (tp_obj).Set(Napi::String::New(env, "offset"), - Napi::Number::New(env, tp.offset)); + Napi::Number::New(env, tp.offset)); } (tp_array).Set(i, tp_obj); @@ -84,7 +84,7 @@ void Dispatcher::AsyncHandleCloseCallback(uv_handle_t *handle) { void Dispatcher::Deactivate() { if (async) { uv_close(reinterpret_cast(async), - Dispatcher::AsyncHandleCloseCallback); + Dispatcher::AsyncHandleCloseCallback); async = NULL; } } @@ -206,53 +206,53 @@ void EventDispatcher::Flush() { switch (_events[i].type) { case RdKafka::Event::EVENT_ERROR: - argv[0] = Napi::String::New(env, "error"); - argv[1] = Napi::Error::New(env, _events[i].message.c_str()); + argv[0] = Napi::String::New(env, "error"); + argv[1] = Napi::Error::New(env, _events[i].message.c_str()); - // if (event->err() == RdKafka::ERR__ALL_BROKERS_DOWN). Stop running - // This may be better suited to the node side of things - break; + // if (event->err() == RdKafka::ERR__ALL_BROKERS_DOWN). Stop running + // This may be better suited to the node side of things + break; case RdKafka::Event::EVENT_STATS: - argv[0] = Napi::String::New(env, "stats"); + argv[0] = Napi::String::New(env, "stats"); - (jsobj).Set(Napi::String::New(env, "message"), - Napi::String::New(env, _events[i].message.c_str())); + (jsobj).Set(Napi::String::New(env, "message"), + Napi::String::New(env, _events[i].message.c_str())); - break; + break; case RdKafka::Event::EVENT_LOG: - argv[0] = Napi::String::New(env, "log"); - - (jsobj).Set(Napi::String::New(env, "severity"), - Napi::New(env, _events[i].severity)); - (jsobj).Set(Napi::String::New(env, "fac"), - Napi::New(env, _events[i].fac.c_str())); - (jsobj).Set(Napi::String::New(env, "message"), - Napi::New(env, _events[i].message.c_str())); - (jsobj).Set(Napi::String::New(env, "name"), - Napi::New(env, this->client_name.c_str())); - - break; + argv[0] = Napi::String::New(env, "log"); + + (jsobj).Set(Napi::String::New(env, "severity"), + Napi::New(env, _events[i].severity)); + (jsobj).Set(Napi::String::New(env, "fac"), + Napi::New(env, _events[i].fac.c_str())); + (jsobj).Set(Napi::String::New(env, "message"), + Napi::New(env, _events[i].message.c_str())); + (jsobj).Set(Napi::String::New(env, "name"), + Napi::New(env, this->client_name.c_str())); + + break; case RdKafka::Event::EVENT_THROTTLE: - argv[0] = Napi::String::New(env, "throttle"); + argv[0] = Napi::String::New(env, "throttle"); - (jsobj).Set(Napi::String::New(env, "message"), - Napi::New(env, _events[i].message.c_str())); + (jsobj).Set(Napi::String::New(env, "message"), + Napi::New(env, _events[i].message.c_str())); - (jsobj).Set(Napi::String::New(env, "throttleTime"), - Napi::New(env, _events[i].throttle_time)); - (jsobj).Set(Napi::String::New(env, "brokerName"), - Napi::New(env, _events[i].broker_name)); - (jsobj).Set(Napi::String::New(env, "brokerId"), - Napi::Number::New(env, _events[i].broker_id)); + (jsobj).Set(Napi::String::New(env, "throttleTime"), + Napi::New(env, _events[i].throttle_time)); + (jsobj).Set(Napi::String::New(env, "brokerName"), + Napi::New(env, _events[i].broker_name)); + (jsobj).Set(Napi::String::New(env, "brokerId"), + Napi::Number::New(env, _events[i].broker_id)); - break; + break; default: - argv[0] = Napi::String::New(env, "event"); + argv[0] = Napi::String::New(env, "event"); - (jsobj).Set(Napi::String::New(env, "message"), - Napi::New(env, events[i].message.c_str())); + (jsobj).Set(Napi::String::New(env, "message"), + Napi::New(env, events[i].message.c_str())); - break; + break; } if (_events[i].type != RdKafka::Event::EVENT_ERROR) { @@ -301,34 +301,34 @@ void DeliveryReportDispatcher::Flush() { const DeliveryReport& event = events_list[i]; if (event.is_error) { - // If it is an error we need the first argument to be set - argv[0] = Napi::New(env, event.error_code); + // If it is an error we need the first argument to be set + argv[0] = Napi::New(env, event.error_code); } else { - argv[0] = env.Null(); + argv[0] = env.Null(); } Napi::Object jsobj(Napi::Object::New(env)); (jsobj).Set(Napi::String::New(env, "topic"), - Napi::New(env, event.topic_name)); + Napi::New(env, event.topic_name)); (jsobj).Set(Napi::String::New(env, "partition"), - Napi::Number::New(env, event.partition)); + Napi::Number::New(env, event.partition)); (jsobj).Set(Napi::String::New(env, "offset"), - Napi::Number::New(env, event.offset)); + Napi::Number::New(env, event.offset)); if (event.key) { - Napi::MaybeLocal buff = Napi::Buffer::New(env, - static_cast(event.key), - static_cast(event.key_len)); + Napi::MaybeLocal buff = Napi::Buffer::New(env, + static_cast(event.key), + static_cast(event.key_len)); (jsobj).Set(Napi::String::New(env, "key"), - buff); + buff); } else { (jsobj).Set(Napi::String::New(env, "key"), env.Null()); } if (event.opaque) { Napi::Persistent * persistent = - static_cast *>(event.opaque); + static_cast *>(event.opaque); Napi::Value object = Napi::New(env, *persistent); (jsobj).Set(Napi::String::New(env, "opaque"), object); @@ -341,25 +341,25 @@ void DeliveryReportDispatcher::Flush() { if (event.timestamp > -1) { (jsobj).Set(Napi::String::New(env, "timestamp"), - Napi::Number::New(env, event.timestamp)); + Napi::Number::New(env, event.timestamp)); } if (event.m_include_payload) { if (event.payload) { - Napi::MaybeLocal buff = Napi::Buffer::New(env, - static_cast(event.payload), - static_cast(event.len)); + Napi::MaybeLocal buff = Napi::Buffer::New(env, + static_cast(event.payload), + static_cast(event.len)); - (jsobj).Set(Napi::String::New(env, "value"), - buff); + (jsobj).Set(Napi::String::New(env, "value"), + buff); } else { - (jsobj).Set(Napi::String::New(env, "value"), - env.Null()); + (jsobj).Set(Napi::String::New(env, "value"), + env.Null()); } } (jsobj).Set(Napi::String::New(env, "size"), - Napi::Number::New(env, event.len)); + Napi::Number::New(env, event.len)); argv[1] = jsobj; @@ -588,9 +588,9 @@ Partitioner::Partitioner() {} Partitioner::~Partitioner() {} int32_t Partitioner::partitioner_cb(const RdKafka::Topic *topic, - const std::string *key, - int32_t partition_cnt, - void *msg_opaque) { + const std::string *key, + int32_t partition_cnt, + void *msg_opaque) { // Send this and get the callback and parse the int if (callback.IsEmpty()) { // default behavior diff --git a/src/callbacks.h b/src/callbacks.h index b8f46a18..4235668b 100644 --- a/src/callbacks.h +++ b/src/callbacks.h @@ -47,7 +47,7 @@ class Dispatcher { private: inline static void func(uv_async_t *async) { Dispatcher *dispatcher = - static_cast(async->data); + static_cast(async->data); dispatcher->Flush(); } static void AsyncHandleCloseCallback(uv_handle_t *); @@ -166,18 +166,18 @@ struct rebalance_event_t { std::vector partitions; rebalance_event_t(RdKafka::ErrorCode p_err, - std::vector p_partitions): - err(p_err) { + std::vector p_partitions): + err(p_err) { // Iterate over the topic partitions because we won't have them later for (size_t topic_partition_i = 0; topic_partition_i < p_partitions.size(); topic_partition_i++) { RdKafka::TopicPartition* topic_partition = - p_partitions[topic_partition_i]; + p_partitions[topic_partition_i]; event_topic_partition_t tp( - topic_partition->topic(), - topic_partition->partition(), - topic_partition->offset()); + topic_partition->topic(), + topic_partition->partition(), + topic_partition->offset()); partitions.push_back(tp); } @@ -195,13 +195,13 @@ struct offset_commit_event_t { for (size_t topic_partition_i = 0; topic_partition_i < p_partitions.size(); topic_partition_i++) { RdKafka::TopicPartition* topic_partition = - p_partitions[topic_partition_i]; + p_partitions[topic_partition_i]; // Just reuse this thing because it's the same exact thing we need event_topic_partition_t tp( - topic_partition->topic(), - topic_partition->partition(), - topic_partition->offset()); + topic_partition->topic(), + topic_partition->partition(), + topic_partition->offset()); partitions.push_back(tp); } diff --git a/src/common.cc b/src/common.cc index f1f88b15..8a321def 100644 --- a/src/common.cc +++ b/src/common.cc @@ -10,7 +10,6 @@ #include "src/common.h" #include -#include #include #include @@ -21,10 +20,10 @@ void Log(std::string str) { } template -T GetParameter(Napi::Object object, std::string field_name, T def) { - Napi::String field = Napi::New(env, field_name.c_str()); - if ((object).Has(field).FromMaybe(false)) { - Napi::Maybe maybeT = Napi::To((object).Get(field)); +T GetParameter(Napi::Env& env, Napi::Object object, std::string field_name, T def) { + Napi::String field = Napi::String::New(env, field_name.c_str()); + if (object.Has(field)) { + Napi::MaybeOrValue maybeT = object.Get(field); if (maybeT.IsNothing()) { return def; } else { @@ -84,8 +83,8 @@ int GetParameter(Napi::Object object, template<> std::string GetParameter(Napi::Object object, - std::string field_name, - std::string def) { + std::string field_name, + std::string def) { Napi::String field = Napi::New(env, field_name.c_str()); if ((object).Has(field).FromMaybe(false)) { Napi::Value parameter = @@ -94,13 +93,13 @@ std::string GetParameter(Napi::Object object, if (!parameter->IsUndefined() && !parameter->IsNull()) { Napi::String val = parameter.To() - ; + ; if (!val->IsUndefined() && !val->IsNull()) { - std::string parameterValue = val.As(); - std::string parameterString(*parameterValue); + std::string parameterValue = val.As(); + std::string parameterString(*parameterValue); - return parameterString; + return parameterString; } } } @@ -130,11 +129,11 @@ std::vector v8ArrayToStringVector(Napi::Array parameter) { for (unsigned int i = 0; i < parameter->Length(); i++) { Napi::Value v; if (!(parameter).Get(i).ToLocal(&v)) { - continue; + continue; } Napi::MaybeLocal p = v.To(); if (p.IsEmpty()) { - continue; + continue; } std::string pVal = p.ToLocalChecked(.As()); std::string pString(*pVal); @@ -150,11 +149,11 @@ std::list v8ArrayToStringList(Napi::Array parameter) { for (unsigned int i = 0; i < parameter->Length(); i++) { Napi::Value v; if (!(parameter).Get(i).ToLocal(&v)) { - continue; + continue; } Napi::MaybeLocal p = v.To(); if (p.IsEmpty()) { - continue; + continue; } std::string pVal = p.ToLocalChecked(.As()); std::string pString(*pVal); @@ -191,27 +190,27 @@ std::vector ToStringVector(Napi::Array parameter) { for (unsigned int i = 0; i < parameter->Length(); i++) { Napi::Value element; if (!(parameter).Get(i).ToLocal(&element)) { - continue; + continue; } if (!element->IsRegExp()) { - Napi::MaybeLocal p = element.To(); + Napi::MaybeLocal p = element.To(); - if (p.IsEmpty()) { - continue; - } + if (p.IsEmpty()) { + continue; + } - std::string pVal = p.ToLocalChecked(.As()); - std::string pString(*pVal); + std::string pVal = p.ToLocalChecked(.As()); + std::string pString(*pVal); - newItem.push_back(pString); + newItem.push_back(pString); } else { - std::string pVal = element.As(.As()->GetSource()); - std::string pString(*pVal); + std::string pVal = element.As(.As()->GetSource()); + std::string pString(*pVal); - Log(pString); + Log(pString); - newItem.push_back(pString); + newItem.push_back(pString); } } } @@ -235,12 +234,12 @@ Napi::Array ToV8Array(std::vector parameter) { * objects. */ Napi::Array ToV8Array(const rd_kafka_error_t** error_list, - size_t error_cnt) { + size_t error_cnt) { Napi::Array errors = Napi::Array::New(env); for (size_t i = 0; i < error_cnt; i++) { RdKafka::ErrorCode code = - static_cast(rd_kafka_error_code(error_list[i])); + static_cast(rd_kafka_error_code(error_list[i])); std::string msg = std::string(rd_kafka_error_string(error_list[i])); (errors).Set(i, RdKafkaError(code, msg)); } @@ -263,16 +262,16 @@ Napi::Object ToV8Object(const rd_kafka_Node_t* node) { Napi::Object obj = Napi::Object::New(env); (obj).Set(Napi::String::New(env, "id"), - Napi::Number::New(env, rd_kafka_Node_id(node))); + Napi::Number::New(env, rd_kafka_Node_id(node))); (obj).Set(Napi::String::New(env, "host"), - Napi::String::New(env, rd_kafka_Node_host(node))); + Napi::String::New(env, rd_kafka_Node_host(node))); (obj).Set(Napi::String::New(env, "port"), - Napi::Number::New(env, rd_kafka_Node_port(node))); + Napi::Number::New(env, rd_kafka_Node_port(node))); const char* rack = rd_kafka_Node_rack(node); if (rack) { (obj).Set(Napi::String::New(env, "rack"), - Napi::String::New(env, rack)); + Napi::String::New(env, rack)); } return obj; @@ -284,19 +283,19 @@ Napi::Object ToV8Object(const rd_kafka_Node_t* node) { Napi::Object UuidToV8Object(const rd_kafka_Uuid_t* uuid) { /*Return object type { - mostSignificantBits: bigint - leastSignificantBits: bigint - base64: string + mostSignificantBits: bigint + leastSignificantBits: bigint + base64: string } */ Napi::Object obj = Napi::Object::New(env); (obj).Set(Napi::String::New(env, "mostSignificantBits"), - v8::BigInt::New(v8::Isolate::GetCurrent(), - rd_kafka_Uuid_most_significant_bits(uuid))); + v8::BigInt::New(v8::Isolate::GetCurrent(), + rd_kafka_Uuid_most_significant_bits(uuid))); (obj).Set(Napi::String::New(env, "leastSignificantBits"), - v8::BigInt::New(v8::Isolate::GetCurrent(), - rd_kafka_Uuid_least_significant_bits(uuid))); + v8::BigInt::New(v8::Isolate::GetCurrent(), + rd_kafka_Uuid_least_significant_bits(uuid))); ( obj).Set(Napi::String::New(env, "base64"), Napi::String::New(env, rd_kafka_Uuid_base64str(uuid))); @@ -345,38 +344,38 @@ Napi::Array ToV8Array( // an error field to TopicPartition? Or create a TopicPartitionError? if (topic_partition->err() != RdKafka::ErrorCode::ERR_NO_ERROR) { (array).Set(topic_partition_i, - Napi::Error::New(env, Napi::New(env, RdKafka::err2str(topic_partition->err())) - )); + Napi::Error::New(env, Napi::New(env, RdKafka::err2str(topic_partition->err())) + )); } else { // We have the list now let's get the properties from it Napi::Object obj = Napi::Object::New(env); if (topic_partition->offset() != RdKafka::Topic::OFFSET_INVALID) { - (obj).Set(Napi::String::New(env, "offset"), - Napi::Number::New(env, topic_partition->offset())); + (obj).Set(Napi::String::New(env, "offset"), + Napi::Number::New(env, topic_partition->offset())); } // If present, size >= 1, since it will include at least the // null terminator. if (topic_partition->get_metadata().size() > 0) { - (obj).Set(Napi::String::New(env, "metadata"), - Napi::String::New(env, - reinterpret_cast(topic_partition->get_metadata().data()), // NOLINT - // null terminator is not required by the constructor. - topic_partition->get_metadata().size() - 1) - ); + (obj).Set(Napi::String::New(env, "metadata"), + Napi::String::New(env, + reinterpret_cast(topic_partition->get_metadata().data()), // NOLINT + // null terminator is not required by the constructor. + topic_partition->get_metadata().size() - 1) + ); } (obj).Set(Napi::String::New(env, "partition"), - Napi::Number::New(env, topic_partition->partition())); + Napi::Number::New(env, topic_partition->partition())); (obj).Set(Napi::String::New(env, "topic"), - Napi::String::New(env, topic_partition->topic().c_str()) - ); + Napi::String::New(env, topic_partition->topic().c_str()) + ); int leader_epoch = topic_partition->get_leader_epoch(); if (leader_epoch >= 0) { - (obj).Set(Napi::String::New(env, "leaderEpoch"), - Napi::Number::New(env, leader_epoch)); + (obj).Set(Napi::String::New(env, "leaderEpoch"), + Napi::Number::New(env, leader_epoch)); } (array).Set(topic_partition_i, obj); @@ -406,30 +405,30 @@ Napi::Array ToTopicPartitionV8Array( for (int topic_partition_i = 0; topic_partition_i < topic_partition_list->cnt; topic_partition_i++) { rd_kafka_topic_partition_t topic_partition = - topic_partition_list->elems[topic_partition_i]; + topic_partition_list->elems[topic_partition_i]; Napi::Object obj = Napi::Object::New(env); (obj).Set(Napi::String::New(env, "partition"), - Napi::Number::New(env, topic_partition.partition)); + Napi::Number::New(env, topic_partition.partition)); (obj).Set(Napi::String::New(env, "topic"), - Napi::String::New(env, topic_partition.topic)); + Napi::String::New(env, topic_partition.topic)); if (topic_partition.err != RD_KAFKA_RESP_ERR_NO_ERROR) { Napi::Object error = NodeKafka::RdKafkaError( - static_cast(topic_partition.err)); + static_cast(topic_partition.err)); (obj).Set(Napi::String::New(env, "error"), error); } if (include_offset) { (obj).Set(Napi::String::New(env, "offset"), - Napi::Number::New(env, topic_partition.offset)); + Napi::Number::New(env, topic_partition.offset)); } int leader_epoch = - rd_kafka_topic_partition_get_leader_epoch(&topic_partition); + rd_kafka_topic_partition_get_leader_epoch(&topic_partition); if (leader_epoch >= 0) { (obj).Set(Napi::String::New(env, "leaderEpoch"), - Napi::Number::New(env, leader_epoch)); + Napi::Number::New(env, leader_epoch)); } (array).Set(topic_partition_i, obj); @@ -453,13 +452,13 @@ std::vector FromV8Array( topic_partition_i < topic_partition_list->Length(); topic_partition_i++) { Napi::Value topic_partition_value; if (!(topic_partition_list).Get(topic_partition_i) - .ToLocal(&topic_partition_value)) { + .ToLocal(&topic_partition_value)) { continue; } if (topic_partition_value.IsObject()) { array.push_back(FromV8Object( - topic_partition_value.To())); + topic_partition_value.To())); } } @@ -493,7 +492,7 @@ rd_kafka_topic_partition_list_t* TopicPartitionv8ArrayToTopicPartitionList( int partition = GetParameter(item, "partition", -1); rd_kafka_topic_partition_t* toppar = - rd_kafka_topic_partition_list_add(newList, topic.c_str(), partition); + rd_kafka_topic_partition_list_add(newList, topic.c_str(), partition); if (include_offset) { int64_t offset = GetParameter(item, "offset", 0); @@ -506,7 +505,7 @@ rd_kafka_topic_partition_list_t* TopicPartitionv8ArrayToTopicPartitionList( /** * @brief v8 Array of Topic Partitions with offsetspec to * rd_kafka_topic_partition_list_t - * + * * @note Converts a v8 array of type [{topic: string, partition: number, * offset: {timestamp: number}}] to a rd_kafka_topic_partition_list_t */ @@ -532,10 +531,10 @@ TopicPartitionOffsetSpecv8ArrayToTopicPartitionList( int partition = GetParameter(item, "partition", -1); rd_kafka_topic_partition_t* toppar = - rd_kafka_topic_partition_list_add(newList, topic.c_str(), partition); + rd_kafka_topic_partition_list_add(newList, topic.c_str(), partition); Napi::Value offsetValue = - (item).Get(Napi::String::New(env, "offset")); + (item).Get(Napi::String::New(env, "offset")); Napi::Object offsetObject = offsetValue.As(); int64_t offset = GetParameter(offsetObject, "timestamp", 0); @@ -567,15 +566,15 @@ return NULL; Napi::String metadataKey = Napi::String::New(env, "metadata"); if ((topic_partition).Has(metadataKey).FromMaybe(false)) { Napi::Value metadataValue = - (topic_partition).Get(metadataKey); + (topic_partition).Get(metadataKey); if (metadataValue.IsString()) { std::string metadataValueUtf8Str = metadataValue.As(.As()); std::string metadataValueStr(*metadataValueUtf8Str); std::vector metadataVector(metadataValueStr.begin(), - metadataValueStr.end()); + metadataValueStr.end()); metadataVector.push_back( - '\0'); // The null terminator is not included in the iterator. + '\0'); // The null terminator is not included in the iterator. toppar->set_metadata(metadataVector); } } @@ -585,7 +584,7 @@ return NULL; Napi::String::New(env, "leaderEpoch"); if ((topic_partition).Has(leaderEpochKey).FromMaybe(false)) { Napi::Value leaderEpochValue = - (topic_partition).Get(leaderEpochKey); + (topic_partition).Get(leaderEpochKey); if (leaderEpochValue.IsNumber()) { int32_t leaderEpoch = leaderEpochValue.As().Int32Value(); @@ -662,9 +661,9 @@ Napi::Object ToV8Object(RdKafka::Metadata* metadata) { Napi::Object current_partition = Napi::Object::New(env); (current_partition).Set(Napi::String::New(env, "id"), - Napi::Number::New(env, xx->id())); + Napi::Number::New(env, xx->id())); (current_partition).Set(Napi::String::New(env, "leader"), - Napi::Number::New(env, xx->leader())); + Napi::Number::New(env, xx->leader())); const std::vector * replicas = xx->replicas(); const std::vector * isrs = xx->isrs(); @@ -678,19 +677,19 @@ Napi::Object ToV8Object(RdKafka::Metadata* metadata) { Napi::Array current_replicas = Napi::Array::New(env); for (r_it = replicas->begin(); r_it != replicas->end(); ++r_it, r_i++) { - (current_replicas).Set(r_i, Napi::Int32::New(env, *r_it)); + (current_replicas).Set(r_i, Napi::Int32::New(env, *r_it)); } Napi::Array current_isrs = Napi::Array::New(env); for (i_it = isrs->begin(); i_it != isrs->end(); ++i_it, i_i++) { - (current_isrs).Set(i_i, Napi::Int32::New(env, *i_it)); + (current_isrs).Set(i_i, Napi::Int32::New(env, *i_it)); } (current_partition).Set(Napi::String::New(env, "replicas"), - current_replicas); + current_replicas); (current_partition).Set(Napi::String::New(env, "isrs"), - current_isrs); + current_isrs); (current_topic_partitions).Set(partition_i, current_partition); } // iterate over partitions @@ -723,8 +722,8 @@ Napi::Object ToV8Object(RdKafka::Message *message) { } Napi::Object ToV8Object(RdKafka::Message *message, - bool include_payload, - bool include_headers) { + bool include_payload, + bool include_headers) { if (message->err() == RdKafka::ERR_NO_ERROR) { Napi::Object pack = Napi::Object::New(env); @@ -732,13 +731,13 @@ Napi::Object ToV8Object(RdKafka::Message *message, if (!include_payload) { (pack).Set(Napi::String::New(env, "value"), - env.Undefined()); + env.Undefined()); } else if (message_payload) { (pack).Set(Napi::String::New(env, "value"), - Napi::Encode(message_payload, message->len(), Napi::Encoding::BUFFER)); + Napi::Encode(message_payload, message->len(), Napi::Encoding::BUFFER)); } else { (pack).Set(Napi::String::New(env, "value"), - env.Null()); + env.Null()); } RdKafka::Headers* headers; @@ -747,13 +746,13 @@ Napi::Object ToV8Object(RdKafka::Message *message, int index = 0; std::vector all = headers->get_all(); for (std::vector::iterator it = all.begin(); - it != all.end(); it++) { - Napi::Object v8header = Napi::Object::New(env); - (v8header).Set(Napi::String::New(env, it->key()), - Napi::Encode(it->value_string(), - it->value_size(), Napi::Encoding::BUFFER)); - (v8headers).Set(index, v8header); - index++; + it != all.end(); it++) { + Napi::Object v8header = Napi::Object::New(env); + (v8header).Set(Napi::String::New(env, it->key()), + Napi::Encode(it->value_string(), + it->value_size(), Napi::Encoding::BUFFER)); + (v8headers).Set(index, v8header); + index++; } (pack).Set(Napi::String::New(env, "headers"), v8headers); } @@ -767,10 +766,10 @@ Napi::Object ToV8Object(RdKafka::Message *message, // We want this to also be a buffer to avoid corruption // https://github.com/confluentinc/confluent-kafka-javascript/issues/208 (pack).Set(Napi::String::New(env, "key"), - Napi::Encode(key_payload, message->key_len(), Napi::Encoding::BUFFER)); + Napi::Encode(key_payload, message->key_len(), Napi::Encoding::BUFFER)); } else { (pack).Set(Napi::String::New(env, "key"), - env.Null()); + env.Null()); } (pack).Set(Napi::String::New(env, "topic"), @@ -785,7 +784,7 @@ Napi::Object ToV8Object(RdKafka::Message *message, int32_t leader_epoch = message->leader_epoch(); if (leader_epoch >= 0) { (pack).Set(Napi::String::New(env, "leaderEpoch"), - Napi::Number::New(env, leader_epoch)); + Napi::Number::New(env, leader_epoch)); } return pack; @@ -841,32 +840,32 @@ rd_kafka_NewTopic_t* FromV8TopicObject( if (!config_keys.IsEmpty()) { Napi::Array field_array = config_keys; for (size_t i = 0; i < field_array->Length(); i++) { - Napi::String config_key = (field_array).Get(i) - .As(); - Napi::Value config_value = (config).Get(config_key) - ; - - // If the config value is a string... - if (config_value.IsString()) { - std::string pKeyVal = config_key.As(); - std::string pKeyString(*pKeyVal); - - std::string pValueVal = config_value.As(.As()); - std::string pValString(*pValueVal); - - err = rd_kafka_NewTopic_set_config( - new_topic, pKeyString.c_str(), pValString.c_str()); - - if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { - errstr = rd_kafka_err2str(err); - rd_kafka_NewTopic_destroy(new_topic); - return NULL; - } - } else { - errstr = "Config values must all be provided as strings."; - rd_kafka_NewTopic_destroy(new_topic); - return NULL; - } + Napi::String config_key = (field_array).Get(i) + .As(); + Napi::Value config_value = (config).Get(config_key) + ; + + // If the config value is a string... + if (config_value.IsString()) { + std::string pKeyVal = config_key.As(); + std::string pKeyString(*pKeyVal); + + std::string pValueVal = config_value.As(.As()); + std::string pValString(*pValueVal); + + err = rd_kafka_NewTopic_set_config( + new_topic, pKeyString.c_str(), pValString.c_str()); + + if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { + errstr = rd_kafka_err2str(err); + rd_kafka_NewTopic_destroy(new_topic); + return NULL; + } + } else { + errstr = "Config values must all be provided as strings."; + rd_kafka_NewTopic_destroy(new_topic); + return NULL; + } } } } @@ -890,18 +889,18 @@ std::vector FromV8GroupStateArray( for (unsigned int i = 0; i < parameter->Length(); i++) { Napi::Value v; if (!(parameter).Get(i).ToLocal(&v)) { - continue; + continue; } Napi::Maybe maybeT = v.As().Int64Value(); if (maybeT.IsNothing()) { - continue; + continue; } int64_t state_number = maybeT; if (state_number >= RD_KAFKA_CONSUMER_GROUP_STATE__CNT) { - continue; + continue; } returnVec.push_back( - static_cast(state_number)); + static_cast(state_number)); } } return returnVec; @@ -915,10 +914,10 @@ Napi::Object FromListConsumerGroupsResult( /* Return object type: { groups: { - groupId: string, - protocolType: string, - isSimpleConsumerGroup: boolean, - state: ConsumerGroupState (internally a number) + groupId: string, + protocolType: string, + isSimpleConsumerGroup: boolean, + state: ConsumerGroupState (internally a number) }[], errors: LibrdKafkaError[] } @@ -929,7 +928,7 @@ Napi::Object FromListConsumerGroupsResult( const rd_kafka_error_t** error_list = rd_kafka_ListConsumerGroups_result_errors(result, &error_cnt); (returnObject).Set(Napi::String::New(env, "errors"), - Conversion::Util::ToV8Array(error_list, error_cnt)); + Conversion::Util::ToV8Array(error_list, error_cnt)); Napi::Array groups = Napi::Array::New(env); size_t groups_cnt; @@ -941,20 +940,20 @@ Napi::Object FromListConsumerGroupsResult( Napi::Object groupObject = Napi::Object::New(env); (groupObject).Set(Napi::String::New(env, "groupId"), - Napi::String::New(env, rd_kafka_ConsumerGroupListing_group_id(group)) - ); + Napi::String::New(env, rd_kafka_ConsumerGroupListing_group_id(group)) + ); bool is_simple = - rd_kafka_ConsumerGroupListing_is_simple_consumer_group(group); + rd_kafka_ConsumerGroupListing_is_simple_consumer_group(group); (groupObject).Set(Napi::String::New(env, "isSimpleConsumerGroup"), - Napi::Boolean::New(env, is_simple)); + Napi::Boolean::New(env, is_simple)); std::string protocol_type = is_simple ? "simple" : "consumer"; (groupObject).Set(Napi::String::New(env, "protocolType"), - Napi::String::New(env, protocol_type)); + Napi::String::New(env, protocol_type)); (groupObject).Set(Napi::String::New(env, "state"), - Napi::Number::New(env, rd_kafka_ConsumerGroupListing_state(group))); + Napi::Number::New(env, rd_kafka_ConsumerGroupListing_state(group))); (groups).Set(i, groupObject); } @@ -970,48 +969,48 @@ Napi::Object FromMemberDescription( const rd_kafka_MemberDescription_t* member) { /* Return object type: { - clientHost: string - clientId: string - memberId: string - memberAssignment: Buffer // will be always null - memberMetadata: Buffer // will be always null - groupInstanceId: string - assignment: { - topicPartitions: TopicPartition[] - }, + clientHost: string + clientId: string + memberId: string + memberAssignment: Buffer // will be always null + memberMetadata: Buffer // will be always null + groupInstanceId: string + assignment: { + topicPartitions: TopicPartition[] + }, } */ Napi::Object returnObject = Napi::Object::New(env); // clientHost (returnObject).Set(Napi::String::New(env, "clientHost"), - Napi::String::New(env, rd_kafka_MemberDescription_host(member)) - ); + Napi::String::New(env, rd_kafka_MemberDescription_host(member)) + ); // clientId (returnObject).Set(Napi::String::New(env, "clientId"), - Napi::String::New(env, rd_kafka_MemberDescription_client_id(member)) - ); + Napi::String::New(env, rd_kafka_MemberDescription_client_id(member)) + ); // memberId (returnObject).Set(Napi::String::New(env, "memberId"), - Napi::String::New(env, rd_kafka_MemberDescription_consumer_id(member)) - ); + Napi::String::New(env, rd_kafka_MemberDescription_consumer_id(member)) + ); // memberAssignment - not passed to user, always null (returnObject).Set(Napi::String::New(env, "memberAssignment"), - env.Null()); + env.Null()); // memberMetadata - not passed to user, always null (returnObject).Set(Napi::String::New(env, "memberMetadata"), - env.Null()); + env.Null()); // groupInstanceId const char* group_instance_id = rd_kafka_MemberDescription_group_instance_id(member); if (group_instance_id) { (returnObject).Set(Napi::String::New(env, "groupInstanceId"), - Napi::String::New(env, group_instance_id)); + Napi::String::New(env, group_instance_id)); } // assignment @@ -1023,9 +1022,9 @@ Napi::Object FromMemberDescription( Conversion::TopicPartition::ToTopicPartitionV8Array(partitions, false); Napi::Object assignmentObject = Napi::Object::New(env); (assignmentObject).Set(Napi::String::New(env, "topicPartitions"), - topicPartitions); + topicPartitions); (returnObject).Set(Napi::String::New(env, "assignment"), - assignmentObject); + assignmentObject); return returnObject; } @@ -1055,16 +1054,16 @@ Napi::Object FromConsumerGroupDescription( ( returnObject).Set(Napi::String::New(env, "groupId"), Napi::String::New(env, rd_kafka_ConsumerGroupDescription_group_id(desc)) - ); + ); // error const rd_kafka_error_t* error = rd_kafka_ConsumerGroupDescription_error(desc); if (error) { RdKafka::ErrorCode code = - static_cast(rd_kafka_error_code(error)); + static_cast(rd_kafka_error_code(error)); std::string msg = std::string(rd_kafka_error_string(error)); (returnObject).Set(Napi::String::New(env, "error"), - RdKafkaError(code, msg)); + RdKafkaError(code, msg)); } // members @@ -1072,7 +1071,7 @@ Napi::Object FromConsumerGroupDescription( size_t member_cnt = rd_kafka_ConsumerGroupDescription_member_count(desc); for (size_t i = 0; i < member_cnt; i++) { const rd_kafka_MemberDescription_t* member = - rd_kafka_ConsumerGroupDescription_member(desc, i); + rd_kafka_ConsumerGroupDescription_member(desc, i); (members).Set(i, FromMemberDescription(member)); } (returnObject).Set(Napi::String::New(env, "members"), members); @@ -1081,48 +1080,48 @@ Napi::Object FromConsumerGroupDescription( bool is_simple = rd_kafka_ConsumerGroupDescription_is_simple_consumer_group(desc); (returnObject).Set(Napi::String::New(env, "isSimpleConsumerGroup"), - Napi::Boolean::New(env, is_simple)); + Napi::Boolean::New(env, is_simple)); // protocolType std::string protocolType = is_simple ? "simple" : "consumer"; (returnObject).Set(Napi::String::New(env, "protocolType"), - Napi::String::New(env, protocolType)); + Napi::String::New(env, protocolType)); // protocol (returnObject).Set(Napi::String::New(env, "protocol"), - Napi::String::New(env, - rd_kafka_ConsumerGroupDescription_partition_assignor(desc)) - ); + Napi::String::New(env, + rd_kafka_ConsumerGroupDescription_partition_assignor(desc)) + ); // partitionAssignor (returnObject).Set(Napi::String::New(env, "partitionAssignor"), - Napi::String::New(env, - rd_kafka_ConsumerGroupDescription_partition_assignor(desc)) - ); + Napi::String::New(env, + rd_kafka_ConsumerGroupDescription_partition_assignor(desc)) + ); // state (returnObject).Set(Napi::String::New(env, "state"), - Napi::Number::New(env, rd_kafka_ConsumerGroupDescription_state(desc))); + Napi::Number::New(env, rd_kafka_ConsumerGroupDescription_state(desc))); // coordinator const rd_kafka_Node_t* coordinator = rd_kafka_ConsumerGroupDescription_coordinator(desc); if (coordinator) { Napi::Object coordinatorObject = - Conversion::Util::ToV8Object(coordinator); + Conversion::Util::ToV8Object(coordinator); (returnObject).Set(Napi::String::New(env, "coordinator"), - coordinatorObject); + coordinatorObject); } // authorizedOperations size_t authorized_operations_cnt; const rd_kafka_AclOperation_t* authorized_operations = rd_kafka_ConsumerGroupDescription_authorized_operations( - desc, &authorized_operations_cnt); + desc, &authorized_operations_cnt); if (authorized_operations) { (returnObject).Set(Napi::String::New(env, "authorizedOperations"), - Conversion::Util::ToV8Array(authorized_operations, - authorized_operations_cnt)); + Conversion::Util::ToV8Array(authorized_operations, + authorized_operations_cnt)); } return returnObject; @@ -1173,22 +1172,22 @@ Napi::Array FromDeleteGroupsResult( Napi::Object group_object = Napi::Object::New(env); (group_object).Set(Napi::String::New(env, "groupId"), - Napi::String::New(env, rd_kafka_group_result_name(group_result)) - ); + Napi::String::New(env, rd_kafka_group_result_name(group_result)) + ); const rd_kafka_error_t* error = rd_kafka_group_result_error(group_result); if (!error) { (group_object).Set(Napi::String::New(env, "errorCode"), - Napi::Number::New(env, RD_KAFKA_RESP_ERR_NO_ERROR)); + Napi::Number::New(env, RD_KAFKA_RESP_ERR_NO_ERROR)); } else { RdKafka::ErrorCode code = - static_cast(rd_kafka_error_code(error)); + static_cast(rd_kafka_error_code(error)); const char* msg = rd_kafka_error_string(error); (group_object).Set(Napi::String::New(env, "errorCode"), - Napi::Number::New(env, code)); + Napi::Number::New(env, code)); (group_object).Set(Napi::String::New(env, "error"), - RdKafkaError(code, msg)); + RdKafkaError(code, msg)); } (returnArray).Set(i, group_object); } @@ -1197,7 +1196,7 @@ Napi::Array FromDeleteGroupsResult( } /** - * @brief Converts a rd_kafka_ListConsumerGroupOffsets_result_t* + * @brief Converts a rd_kafka_ListConsumerGroupOffsets_result_t* * into a v8 Array. */ Napi::Array FromListConsumerGroupOffsetsResult( @@ -1234,22 +1233,22 @@ Napi::Array FromListConsumerGroupOffsetsResult( // Set groupId std::string groupId = rd_kafka_group_result_name(group_result); (group_object).Set(Napi::String::New(env, "groupId"), - Napi::String::New(env, groupId.c_str())); + Napi::String::New(env, groupId.c_str())); // Set group-level error (if any) const rd_kafka_error_t* group_error = - rd_kafka_group_result_error(group_result); + rd_kafka_group_result_error(group_result); if (group_error) { RdKafka::ErrorCode code = - static_cast(rd_kafka_error_code(group_error)); + static_cast(rd_kafka_error_code(group_error)); const char* msg = rd_kafka_error_string(group_error); (group_object).Set(Napi::String::New(env, "error"), - RdKafkaError(code, msg)); + RdKafkaError(code, msg)); } // Get the list of partitions for this group const rd_kafka_topic_partition_list_t* partitionList = - rd_kafka_group_result_partitions(group_result); + rd_kafka_group_result_partitions(group_result); // Prepare array for TopicPartitionOffset[] Napi::Array partitionsArray = Napi::Array::New(env); @@ -1263,44 +1262,44 @@ Napi::Array FromListConsumerGroupOffsetsResult( // Set topic, partition, and offset (partition_object).Set(Napi::String::New(env, "topic"), - Napi::String::New(env, partition->topic)); + Napi::String::New(env, partition->topic)); (partition_object).Set(Napi::String::New(env, "partition"), - Napi::Number::New(env, partition->partition)); + Napi::Number::New(env, partition->partition)); (partition_object).Set(Napi::String::New(env, "offset"), - Napi::Number::New(env, partition->offset)); + Napi::Number::New(env, partition->offset)); // Set metadata (if available) if (partition->metadata != nullptr) { - ( - partition_object).Set(Napi::String::New(env, "metadata"), - Napi::String::New(env, static_cast(partition->metadata)) - ); + ( + partition_object).Set(Napi::String::New(env, "metadata"), + Napi::String::New(env, static_cast(partition->metadata)) + ); } else { - (partition_object).Set(Napi::String::New(env, "metadata"), - env.Null()); + (partition_object).Set(Napi::String::New(env, "metadata"), + env.Null()); } // Set leaderEpoch (if available) int32_t leader_epoch = - rd_kafka_topic_partition_get_leader_epoch(partition); + rd_kafka_topic_partition_get_leader_epoch(partition); if (leader_epoch >= 0) { - (partition_object).Set(Napi::String::New(env, "leaderEpoch"), - Napi::Number::New(env, leader_epoch)); + (partition_object).Set(Napi::String::New(env, "leaderEpoch"), + Napi::Number::New(env, leader_epoch)); } // Set partition-level error (if any) if (partition->err != RD_KAFKA_RESP_ERR_NO_ERROR) { - RdKafka::ErrorCode code = - static_cast(partition->err); - (group_object).Set(Napi::String::New(env, "error"), - RdKafkaError(code, rd_kafka_err2str(partition->err))); + RdKafka::ErrorCode code = + static_cast(partition->err); + (group_object).Set(Napi::String::New(env, "error"), + RdKafkaError(code, rd_kafka_err2str(partition->err))); } (partitionsArray).Set(partitionIndex++, partition_object); } (group_object).Set(Napi::String::New(env, "partitions"), - partitionsArray); + partitionsArray); (returnArray).Set(i, group_object); } @@ -1335,16 +1334,16 @@ Napi::Array FromDeleteRecordsResult( // Set topic, partition, and offset and error(if required) (partition_object).Set(Napi::String::New(env, "topic"), - Napi::String::New(env, partition->topic)); + Napi::String::New(env, partition->topic)); (partition_object).Set(Napi::String::New(env, "partition"), - Napi::Number::New(env, partition->partition)); + Napi::Number::New(env, partition->partition)); (partition_object).Set(Napi::String::New(env, "lowWatermark"), - Napi::Number::New(env, partition->offset)); + Napi::Number::New(env, partition->offset)); if (partition->err != RD_KAFKA_RESP_ERR_NO_ERROR) { RdKafka::ErrorCode code = static_cast(partition->err); (partition_object).Set(Napi::String::New(env, "error"), - RdKafkaError(code, rd_kafka_err2str(partition->err))); + RdKafkaError(code, rd_kafka_err2str(partition->err))); } (partitionsArray).Set(partitionIndex++, partition_object); @@ -1396,74 +1395,74 @@ Napi::Array FromDescribeTopicsResult( const char* topic_name = rd_kafka_TopicDescription_name(results[i]); (topic_object).Set(Napi::String::New(env, "name"), - Napi::String::New(env, topic_name)); + Napi::String::New(env, topic_name)); const rd_kafka_Uuid_t* topic_id = - rd_kafka_TopicDescription_topic_id(results[i]); + rd_kafka_TopicDescription_topic_id(results[i]); (topic_object).Set(Napi::String::New(env, "topicId"), - Conversion::Util::UuidToV8Object(topic_id)); + Conversion::Util::UuidToV8Object(topic_id)); int is_internal = rd_kafka_TopicDescription_is_internal(results[i]); (topic_object).Set(Napi::String::New(env, "isInternal"), - Napi::Boolean::New(env, is_internal)); + Napi::Boolean::New(env, is_internal)); const rd_kafka_error_t* error = rd_kafka_TopicDescription_error(results[i]); if (error) { RdKafka::ErrorCode code = - static_cast(rd_kafka_error_code(error)); + static_cast(rd_kafka_error_code(error)); (topic_object).Set(Napi::String::New(env, "error"), - RdKafkaError(code, rd_kafka_error_string(error))); + RdKafkaError(code, rd_kafka_error_string(error))); } size_t authorized_operations_cnt; const rd_kafka_AclOperation_t* authorized_operations = - rd_kafka_TopicDescription_authorized_operations( - results[i], &authorized_operations_cnt); + rd_kafka_TopicDescription_authorized_operations( + results[i], &authorized_operations_cnt); if (authorized_operations) { (topic_object).Set(Napi::String::New(env, "authorizedOperations"), - Conversion::Util::ToV8Array(authorized_operations, - authorized_operations_cnt)); + Conversion::Util::ToV8Array(authorized_operations, + authorized_operations_cnt)); } size_t partition_cnt; const rd_kafka_TopicPartitionInfo_t** partitions = - rd_kafka_TopicDescription_partitions(results[i], &partition_cnt); + rd_kafka_TopicDescription_partitions(results[i], &partition_cnt); Napi::Array partitionsArray = Napi::Array::New(env); for (size_t j = 0; j < partition_cnt; j++) { Napi::Object partition_object = Napi::Object::New(env); const rd_kafka_TopicPartitionInfo_t* partition = partitions[j]; (partition_object).Set(Napi::String::New(env, "partition"), - Napi::Number::New(env, - rd_kafka_TopicPartitionInfo_partition(partition))); + Napi::Number::New(env, + rd_kafka_TopicPartitionInfo_partition(partition))); const rd_kafka_Node_t* leader = - rd_kafka_TopicPartitionInfo_leader(partition); + rd_kafka_TopicPartitionInfo_leader(partition); (partition_object).Set(Napi::String::New(env, "leader"), - Conversion::Util::ToV8Object(leader)); + Conversion::Util::ToV8Object(leader)); size_t isr_cnt; const rd_kafka_Node_t** isr = - rd_kafka_TopicPartitionInfo_isr(partition, &isr_cnt); + rd_kafka_TopicPartitionInfo_isr(partition, &isr_cnt); Napi::Array isrArray = Napi::Array::New(env); for (size_t k = 0; k < isr_cnt; k++) { - (isrArray).Set(k, Conversion::Util::ToV8Object(isr[k])); + (isrArray).Set(k, Conversion::Util::ToV8Object(isr[k])); } (partition_object).Set(Napi::String::New(env, "isr"), isrArray); size_t replicas_cnt; const rd_kafka_Node_t** replicas = - rd_kafka_TopicPartitionInfo_replicas(partition, &replicas_cnt); + rd_kafka_TopicPartitionInfo_replicas(partition, &replicas_cnt); Napi::Array replicasArray = Napi::Array::New(env); for (size_t k = 0; k < replicas_cnt; k++) { - (replicasArray).Set(k, Conversion::Util::ToV8Object(replicas[k])); + (replicasArray).Set(k, Conversion::Util::ToV8Object(replicas[k])); } (partition_object).Set(Napi::String::New(env, "replicas"), - replicasArray); + replicasArray); (partitionsArray).Set(j, partition_object); } (topic_object).Set(Napi::String::New(env, "partitions"), - partitionsArray); + partitionsArray); (returnArray).Set(topicIndex++, topic_object); } @@ -1495,7 +1494,7 @@ Napi::Array FromListOffsetsResult( for (i = 0; i < result_cnt; i++) { const rd_kafka_topic_partition_t* partition = - rd_kafka_ListOffsetsResultInfo_topic_partition(results[i]); + rd_kafka_ListOffsetsResultInfo_topic_partition(results[i]); int64_t timestamp = rd_kafka_ListOffsetsResultInfo_timestamp(results[i]); // Create the ListOffsetsResult object @@ -1503,25 +1502,25 @@ Napi::Array FromListOffsetsResult( // Set topic, partition, offset, error and timestamp (partition_object).Set(Napi::String::New(env, "topic"), - Napi::String::New(env, partition->topic)); + Napi::String::New(env, partition->topic)); (partition_object).Set(Napi::String::New(env, "partition"), - Napi::Number::New(env, partition->partition)); + Napi::Number::New(env, partition->partition)); (partition_object).Set(Napi::String::New(env, "offset"), - Napi::Number::New(env, partition->offset)); + Napi::Number::New(env, partition->offset)); if (partition->err != RD_KAFKA_RESP_ERR_NO_ERROR) { RdKafka::ErrorCode code = static_cast(partition->err); (partition_object).Set(Napi::String::New(env, "error"), - RdKafkaError(code, rd_kafka_err2str(partition->err))); + RdKafkaError(code, rd_kafka_err2str(partition->err))); } // Set leaderEpoch (if available) int32_t leader_epoch = - rd_kafka_topic_partition_get_leader_epoch(partition); + rd_kafka_topic_partition_get_leader_epoch(partition); if (leader_epoch >= 0) { (partition_object).Set(Napi::String::New(env, "leaderEpoch"), - Napi::Number::New(env, leader_epoch)); + Napi::Number::New(env, leader_epoch)); } (partition_object).Set(Napi::String::New(env, "timestamp"), - Napi::Number::New(env, timestamp)); + Napi::Number::New(env, timestamp)); (resultArray).Set(partitionIndex++, partition_object); } diff --git a/src/connection.h b/src/connection.h index 85bc4a12..12c03194 100644 --- a/src/connection.h +++ b/src/connection.h @@ -57,7 +57,7 @@ template class Connection : public Napi::ObjectWrap { bool IsClosing() const { return m_client != NULL && m_is_closing; } - + // Baton Baton CreateTopic(std::string topic_name, RdKafka::Conf* conf = NULL) { std::string errstr; @@ -285,7 +285,7 @@ template class Connection : public Napi::ObjectWrap { protected: Connection(const Napi::CallbackInfo &info): m_event_cb() { - Napi::Env env = info.Env(); + Napi::Env env = info.Env(); if (!info.IsConstructCall()) { Napi::Error::New(env, "non-constructor invocation not supported").ThrowAsJavaScriptException(); } @@ -299,7 +299,7 @@ template class Connection : public Napi::ObjectWrap { void Config(Conf *gconfig, Conf *tconfig) { this->m_gconfig = gconfig; this->m_tconfig = tconfig; - + std::string errstr; m_client = NULL; @@ -383,7 +383,7 @@ template class Connection : public Napi::ObjectWrap { RdKafka::Error* error = m_client->sasl_background_callbacks_enable(); return rdkafkaErrorToBaton(error); } - + // Baton setupSaslOAuthBearerConfig(); // Baton setupSaslOAuthBearerBackgroundQueue(); @@ -429,7 +429,7 @@ template class Connection : public Napi::ObjectWrap { *callback = Napi::Persistent(cb); Napi::AsyncWorker *worker = new Workers::ConnectionMetadata( - callback, obj, topic, timeout_ms, allTopics); + callback, obj, topic, timeout_ms, allTopics); worker->Queue(); return env.Null(); @@ -456,7 +456,7 @@ template class Connection : public Napi::ObjectWrap { Connection* handle = this; Napi::AsyncWorker *worker = new Workers::Handle::OffsetsForTimes( - callback, handle, toppars, timeout_ms); + callback, handle, toppars, timeout_ms); worker->Queue(); return env.Null(); @@ -506,7 +506,7 @@ template class Connection : public Napi::ObjectWrap { callback->Reset(cb); Napi::AsyncWorker *worker = new Workers::ConnectionQueryWatermarkOffsets( - callback, obj, topic_name, partition, timeout_ms); + callback, obj, topic_name, partition, timeout_ms); worker->Queue(); return env.Null(); @@ -557,7 +557,7 @@ template class Connection : public Napi::ObjectWrap { Napi::Error::New(env, "Need to specify a callbacks object").ThrowAsJavaScriptException(); return env.Null(); } - + Connection* obj = this; const bool add = info[0].As().Value(); @@ -579,11 +579,11 @@ template class Connection : public Napi::ObjectWrap { std::string configs_utf8_key = configs_key.As(); configs_string_key = std::string(configs_utf8_key); if (configs_string_key.compare("global") == 0) { - config_type = 1; + config_type = 1; } else if (configs_string_key.compare("topic") == 0) { - config_type = 2; + config_type = 2; } else if (configs_string_key.compare("event") == 0) { - config_type = 3; + config_type = 3; } else { continue; } @@ -613,23 +613,23 @@ template class Connection : public Napi::ObjectWrap { if (value.IsFunction()) { Napi::Function cb = value.As(); switch (config_type) { - case 1: - obj->m_gconfig->ConfigureCallback(string_key, cb, add, errstr); - if (!errstr.empty()) { - Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); - return env.Null(); - } - break; - case 2: - obj->m_tconfig->ConfigureCallback(string_key, cb, add, errstr); - if (!errstr.empty()) { - Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); - return env.Null(); - } - break; - case 3: - obj->ConfigureCallback(string_key, cb, add); - break; + case 1: + obj->m_gconfig->ConfigureCallback(string_key, cb, add, errstr); + if (!errstr.empty()) { + Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); + return env.Null(); + } + break; + case 2: + obj->m_tconfig->ConfigureCallback(string_key, cb, add, errstr); + if (!errstr.empty()) { + Napi::Error::New(env, errstr.c_str()).ThrowAsJavaScriptException(); + return env.Null(); + } + break; + case 3: + obj->ConfigureCallback(string_key, cb, add); + break; } } } @@ -668,7 +668,7 @@ template class Connection : public Napi::ObjectWrap { int64_t lifetime_ms = info[1].As().Int64Value(); // Get string pointer for the principal_name - std::string principal_nameUtf8 = + std::string principal_nameUtf8 = info[2].As().Utf8Value(); std::string principal_name(principal_nameUtf8); @@ -716,7 +716,7 @@ template class Connection : public Napi::ObjectWrap { Napi::Env env = info.Env(); return Napi::String::From(env, this->Name()); - } + } }; } // namespace NodeKafka diff --git a/src/kafka-consumer.cc b/src/kafka-consumer.cc index b8645ca5..eddaa0d1 100644 --- a/src/kafka-consumer.cc +++ b/src/kafka-consumer.cc @@ -48,7 +48,7 @@ KafkaConsumer::KafkaConsumer(const Napi::CallbackInfo& info): Connectionm_queue_not_empty_cb.dispatcher.AddCallback(cb); @@ -213,7 +213,7 @@ Baton KafkaConsumer::GetWatermarkOffsets( if (IsConnected()) { // Always send true - we err = m_client->get_watermark_offsets(topic_name, partition, - low_offset, high_offset); + low_offset, high_offset); } else { err = RdKafka::ERR__STATE; } @@ -305,13 +305,13 @@ Baton KafkaConsumer::IncrementalUnassign( // For now, use two for loops. Make more efficient if needed later. for (unsigned int i = 0; i < partitions.size(); i++) { for (unsigned int j = 0; j < m_partitions.size(); j++) { - if (partitions[i]->partition() == m_partitions[j]->partition() && - partitions[i]->topic() == m_partitions[j]->topic()) { - delete_partitions.push_back(m_partitions[j]); - m_partitions.erase(m_partitions.begin() + j); - m_partition_cnt--; - break; - } + if (partitions[i]->partition() == m_partitions[j]->partition() && + partitions[i]->topic() == m_partitions[j]->topic()) { + delete_partitions.push_back(m_partitions[j]); + m_partitions.erase(m_partitions.begin() + j); + m_partition_cnt--; + break; + } } } } @@ -507,12 +507,12 @@ Baton KafkaConsumer::Consume(int timeout_ms) { RdKafka::ErrorCode response_code = message->err(); // we want to handle these errors at the call site if (response_code != RdKafka::ERR_NO_ERROR && - response_code != RdKafka::ERR__PARTITION_EOF && - response_code != RdKafka::ERR__TIMED_OUT && - response_code != RdKafka::ERR__TIMED_OUT_QUEUE + response_code != RdKafka::ERR__PARTITION_EOF && + response_code != RdKafka::ERR__TIMED_OUT && + response_code != RdKafka::ERR__TIMED_OUT_QUEUE ) { - delete message; - return Baton(response_code); + delete message; + return Baton(response_code); } return Baton(message); @@ -604,7 +604,7 @@ void KafkaConsumer::Init(Napi::Env env, Napi::Object exports) { InstanceMethod("pause", &KafkaConsumer::NodePause), InstanceMethod("resume", &KafkaConsumer::NodeResume), - + /* * @brief Methods to do with partition assignment / rebalancing */ @@ -626,7 +626,7 @@ void KafkaConsumer::Init(Napi::Env env, Napi::Object exports) { InstanceMethod("offsetsStoreSingle", &KafkaConsumer::NodeOffsetsStoreSingle), }); - constructor.Reset(KafkaConsumer); + constructor.Reset(KafkaConsumer); exports.Set(Napi::String::New(env, "KafkaConsumer"), KafkaConsumer); } @@ -697,7 +697,7 @@ Napi::Value KafkaConsumer::NodeSubscription(const Napi::CallbackInfo &info) { } Napi::Value KafkaConsumer::NodePosition(const Napi::CallbackInfo &info) { - Napi::Env env = info.Env(); + Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 1 || !info[0].IsArray()) { @@ -717,7 +717,7 @@ Napi::Value KafkaConsumer::NodePosition(const Napi::CallbackInfo &info) { return Napi::Number::New(env, error_code); } - return + return Conversion::TopicPartition::ToV8Array(toppars); // Delete the underlying topic partitions @@ -737,7 +737,7 @@ Napi::Value KafkaConsumer::NodeAssignments(const Napi::CallbackInfo& info) { return Napi::Number::New(env, error_code); } - return + return Conversion::TopicPartition::ToV8Array(this->m_partitions); } @@ -786,17 +786,17 @@ Napi::Value KafkaConsumer::NodeAssign(const Napi::CallbackInfo& info) { RdKafka::TopicPartition* part; if (partition < 0) { - part = Connection::GetPartition(topic); + part = Connection::GetPartition(topic); } else { - part = Connection::GetPartition(topic, partition); + part = Connection::GetPartition(topic, partition); } // Set the default value to offset invalid. If provided, we will not set // the offset. int64_t offset = GetParameter( - partition_obj, "offset", RdKafka::Topic::OFFSET_INVALID); + partition_obj, "offset", RdKafka::Topic::OFFSET_INVALID); if (offset != RdKafka::Topic::OFFSET_INVALID) { - part->set_offset(offset); + part->set_offset(offset); } topic_partitions.push_back(part); @@ -863,17 +863,17 @@ Napi::Value KafkaConsumer::NodeIncrementalAssign(const Napi::CallbackInfo &info) RdKafka::TopicPartition* part; if (partition < 0) { - part = Connection::GetPartition(topic); + part = Connection::GetPartition(topic); } else { - part = Connection::GetPartition(topic, partition); + part = Connection::GetPartition(topic, partition); } // Set the default value to offset invalid. If provided, we will not set // the offset. int64_t offset = GetParameter( - partition_obj, "offset", RdKafka::Topic::OFFSET_INVALID); + partition_obj, "offset", RdKafka::Topic::OFFSET_INVALID); if (offset != RdKafka::Topic::OFFSET_INVALID) { - part->set_offset(offset); + part->set_offset(offset); } topic_partitions.push_back(part); @@ -920,17 +920,17 @@ Napi::Value KafkaConsumer::NodeIncrementalUnassign(const Napi::CallbackInfo &inf RdKafka::TopicPartition* part; if (partition < 0) { - part = Connection::GetPartition(topic); + part = Connection::GetPartition(topic); } else { - part = Connection::GetPartition(topic, partition); + part = Connection::GetPartition(topic, partition); } // Set the default value to offset invalid. If provided, we will not set // the offset. int64_t offset = GetParameter( - partition_obj, "offset", RdKafka::Topic::OFFSET_INVALID); + partition_obj, "offset", RdKafka::Topic::OFFSET_INVALID); if (offset != RdKafka::Topic::OFFSET_INVALID) { - part->set_offset(offset); + part->set_offset(offset); } topic_partitions.push_back(part); @@ -951,7 +951,7 @@ Napi::Value KafkaConsumer::NodeIncrementalUnassign(const Napi::CallbackInfo &inf Napi::Value KafkaConsumer::NodeUnsubscribe(const Napi::CallbackInfo &info) { - const Napi::Env env = info.Env(); + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); Baton b = this->Unsubscribe(); @@ -960,7 +960,7 @@ Napi::Value KafkaConsumer::NodeUnsubscribe(const Napi::CallbackInfo &info) { } Napi::Value KafkaConsumer::NodeCommit(const Napi::CallbackInfo &info) { - const Napi::Env env = info.Env(); + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); int error_code; @@ -1003,7 +1003,7 @@ Napi::Value KafkaConsumer::NodeCommit(const Napi::CallbackInfo &info) { } Napi::Value KafkaConsumer::NodeCommitSync(const Napi::CallbackInfo &info) { - const Napi::Env env = info.Env(); + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); int error_code; @@ -1045,7 +1045,7 @@ Napi::Value KafkaConsumer::NodeCommitSync(const Napi::CallbackInfo &info) { } Napi::Value KafkaConsumer::NodeCommitCb(const Napi::CallbackInfo &info) { - const Napi::Env env = info.Env(); + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); int error_code; std::optional> toppars = std::nullopt; @@ -1087,7 +1087,7 @@ Napi::Value KafkaConsumer::NodeCommitCb(const Napi::CallbackInfo &info) { } Napi::Value KafkaConsumer::NodeSubscribe(const Napi::CallbackInfo &info) { - const Napi::Env env = info.Env(); + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 1 || !info[0].IsArray()) { @@ -1107,7 +1107,7 @@ Napi::Value KafkaConsumer::NodeSubscribe(const Napi::CallbackInfo &info) { } Napi::Value KafkaConsumer::NodeSeek(const Napi::CallbackInfo &info) { - const Napi::Env env = info.Env(); + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); // If number of parameters is less than 3 (need topic partition, timeout, @@ -1156,7 +1156,7 @@ Napi::Value KafkaConsumer::NodeSeek(const Napi::CallbackInfo &info) { } Napi::FunctionReference *callback = new Napi::FunctionReference(); - + callback->Reset(info[2].As()); Napi::AsyncWorker *worker = @@ -1195,15 +1195,15 @@ Napi::Value KafkaConsumer::NodeOffsetsStore(const Napi::CallbackInfo &info) { Napi::Value KafkaConsumer::NodeOffsetsStoreSingle(const Napi::CallbackInfo &info) { - Napi::Env env = info.Env(); + Napi::Env env = info.Env(); Napi::HandleScope scope(env); // If number of parameters is less than 3 (need topic partition, partition, // offset, and leader epoch), we can't call this. if (info.Length() < 4) { Napi::Error::New(env, - "Must provide topic, partition, offset and leaderEpoch") - .ThrowAsJavaScriptException(); + "Must provide topic, partition, offset and leaderEpoch") + .ThrowAsJavaScriptException(); return env.Null(); } @@ -1404,7 +1404,7 @@ Napi::Value KafkaConsumer::NodeConsume(const Napi::CallbackInfo &info) { callback->Reset(cb); Napi::AsyncWorker *worker = new Workers::KafkaConsumerConsumeNum( - callback, this, numMessages, timeout_ms, isTimeoutOnlyForFirstMessage); + callback, this, numMessages, timeout_ms, isTimeoutOnlyForFirstMessage); worker->Queue(); } else { if (!info[1].IsFunction()) { @@ -1446,7 +1446,7 @@ Napi::Value KafkaConsumer::NodeConnect(const Napi::CallbackInfo &info) { } Napi::Value KafkaConsumer::NodeDisconnect(const Napi::CallbackInfo &info) { - const Napi::Env env = info.Env(); + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 1 || !info[0].IsFunction()) { @@ -1483,7 +1483,7 @@ Napi::Value KafkaConsumer::NodeGetWatermarkOffsets(const Napi::CallbackInfo &inf Napi::HandleScope scope(env); if (!info[0].IsString()) { - Napi::Error::New(env, "1st parameter must be a topic string").ThrowAsJavaScriptException(); + Napi::Error::New(env, "1st parameter must be a topic string").ThrowAsJavaScriptException(); return env.Null(); } diff --git a/src/kafka-consumer.h b/src/kafka-consumer.h index 7608690c..366091ef 100644 --- a/src/kafka-consumer.h +++ b/src/kafka-consumer.h @@ -91,7 +91,7 @@ class KafkaConsumer : public Connection { void DeactivateDispatchers(); void ConfigureCallback(const std::string& string_key, - const Napi::Function& cb, bool add) override; + const Napi::Function& cb, bool add) override; protected: static Napi::FunctionReference constructor; diff --git a/src/producer.cc b/src/producer.cc index 4afd5e02..fc48f24b 100644 --- a/src/producer.cc +++ b/src/producer.cc @@ -101,7 +101,7 @@ void Producer::Init(const Napi::Env& env, Napi::Object exports) { */ InstanceMethod("configureCallbacks", &Producer::NodeConfigureCallbacks), - + /* * @brief Methods to do with establishing state */ @@ -133,7 +133,7 @@ void Producer::Init(const Napi::Env& env, Napi::Object exports) { InstanceMethod("beginTransaction", &Producer::NodeBeginTransaction), InstanceMethod("commitTransaction", &Producer::NodeCommitTransaction), InstanceMethod("abortTransaction", &Producer::NodeAbortTransaction), - InstanceMethod("sendOffsetsToTransaction", &Producer::NodeSendOffsetsToTransaction), // NOLINT + InstanceMethod("sendOffsetsToTransaction", &Producer::NodeSendOffsetsToTransaction), // NOLINT }); @@ -216,8 +216,8 @@ Baton Producer::Produce(void* message, size_t size, RdKafka::Topic* topic, if (IsConnected()) { RdKafka::Producer* producer = dynamic_cast(m_client); response_code = producer->produce(topic, partition, - RdKafka::Producer::RK_MSG_COPY, - message, size, key, key_len, opaque); + RdKafka::Producer::RK_MSG_COPY, + message, size, key, key_len, opaque); } else { response_code = RdKafka::ERR__STATE; } @@ -286,10 +286,10 @@ Baton Producer::Produce(void* message, size_t size, std::string topic, RdKafka::Producer* producer = dynamic_cast(m_client); // This one is a bit different response_code = producer->produce(topic, partition, - RdKafka::Producer::RK_MSG_COPY, - message, size, - key, key_len, - timestamp, headers, opaque); + RdKafka::Producer::RK_MSG_COPY, + message, size, + key, key_len, + timestamp, headers, opaque); } else { response_code = RdKafka::ERR__STATE; } @@ -503,9 +503,9 @@ Napi::Value Producer::NodeProduce(const Napi::CallbackInfo &info) { if (message_buffer_data == NULL) { // empty string message buffer should not end up as null message Napi::Object message_buffer_object_emptystring = - Napi::Buffer::New(env, new char[0], 0); + Napi::Buffer::New(env, new char[0], 0); message_buffer_length = - message_buffer_object_emptystring.As>().Length(); + message_buffer_object_emptystring.As>().Length(); message_buffer_data = message_buffer_object_emptystring.As>().Data(); // NOLINT } } @@ -534,7 +534,7 @@ Napi::Value Producer::NodeProduce(const Napi::CallbackInfo &info) { if (key_buffer_data == NULL) { // empty string key buffer should not end up as null key Napi::Object key_buffer_object_emptystring = - Napi::Buffer::New(env, new char[0], 0); + Napi::Buffer::New(env, new char[0], 0); key_buffer_length = key_buffer_object_emptystring.As>().Length(); key_buffer_data = key_buffer_object_emptystring.As>().Data(); } @@ -577,45 +577,45 @@ Napi::Value Producer::NodeProduce(const Napi::CallbackInfo &info) { if (v8Headers.Length() >= 1) { for (unsigned int i = 0; i < v8Headers.Length(); i++) { - Napi::Object header = (v8Headers).Get(i) - .ToObject(); - if (header.IsEmpty()) { - continue; - } - - Napi::Array props = header.GetPropertyNames(); - - // TODO: Other properties in the list of properties should not be - // ignored, but they are. This is a bug, need to handle it either in JS - // or here. - Napi::MaybeOrValue jsKey = props.Get(Napi::Value::From(env, 0)); - - // The key must be a string. - if (jsKey.IsEmpty()) { - Napi::Error::New(env, "Header key must be a string").ThrowAsJavaScriptException(); - - } - std::string uKey = jsKey.ToString().Utf8Value(); - std::string key(uKey); - - // Valid types for the header are string or buffer. - // Other types will throw an error. - Napi::Value v8Value = - (header).Get(jsKey); - - if (v8Value.IsBuffer()) { - const char* value = v8Value.As>().Data(); - const size_t value_len = v8Value.As>().Length(); - headers.push_back(RdKafka::Headers::Header(key, value, value_len)); - } else if (v8Value.IsString()) { - std::string uValue = v8Value.As().Utf8Value(); - std::string value(uValue); - headers.push_back( - RdKafka::Headers::Header(key, value.c_str(), value.size())); - } else { - Napi::Error::New(env, "Header value must be a string or buffer").ThrowAsJavaScriptException(); - - } + Napi::Object header = (v8Headers).Get(i) + .ToObject(); + if (header.IsEmpty()) { + continue; + } + + Napi::Array props = header.GetPropertyNames(); + + // TODO: Other properties in the list of properties should not be + // ignored, but they are. This is a bug, need to handle it either in JS + // or here. + Napi::MaybeOrValue jsKey = props.Get(Napi::Value::From(env, 0)); + + // The key must be a string. + if (jsKey.IsEmpty()) { + Napi::Error::New(env, "Header key must be a string").ThrowAsJavaScriptException(); + + } + std::string uKey = jsKey.ToString().Utf8Value(); + std::string key(uKey); + + // Valid types for the header are string or buffer. + // Other types will throw an error. + Napi::Value v8Value = + (header).Get(jsKey); + + if (v8Value.IsBuffer()) { + const char* value = v8Value.As>().Data(); + const size_t value_len = v8Value.As>().Length(); + headers.push_back(RdKafka::Headers::Header(key, value, value_len)); + } else if (v8Value.IsString()) { + std::string uValue = v8Value.As().Utf8Value(); + std::string value(uValue); + headers.push_back( + RdKafka::Headers::Header(key, value.c_str(), value.size())); + } else { + Napi::Error::New(env, "Header value must be a string or buffer").ThrowAsJavaScriptException(); + + } } } } @@ -835,7 +835,7 @@ Napi::Value Producer::NodeInitTransactions(const Napi::CallbackInfo &info) { } Napi::Value Producer::NodeBeginTransaction(const Napi::CallbackInfo &info) { - const Napi::Env env = info.Env(); + const Napi::Env env = info.Env(); Napi::HandleScope scope(env); if (info.Length() < 1 || !info[0].IsFunction()) { @@ -926,7 +926,7 @@ Producer::NodeSendOffsetsToTransaction(const Napi::CallbackInfo &info) { NodeKafka::KafkaConsumer *consumer = ObjectWrap::Unwrap(info[1].As()); - + int timeout_ms = info[2].As().Int32Value(); Napi::Function cb = info[3].As(); Napi::FunctionReference *callback = new Napi::FunctionReference(); diff --git a/src/topic.h b/src/topic.h index 294a9a0d..d2de59a9 100644 --- a/src/topic.h +++ b/src/topic.h @@ -29,7 +29,7 @@ class Topic : public Napi::ObjectWrap { template Baton toRDKafkaTopic(Connection *handle); protected: - static Napi::FunctionReference constructor; + static Napi::FunctionReference constructor; Napi::Value NodeGetMetadata(const Napi::CallbackInfo& info); diff --git a/src/workers.cc b/src/workers.cc index cd03c529..b20fe7fc 100644 --- a/src/workers.cc +++ b/src/workers.cc @@ -37,9 +37,9 @@ namespace Handle { */ OffsetsForTimes::OffsetsForTimes(Napi::FunctionReference *callback, - Connection* handle, - std::vector & t, - const int & timeout_ms) : + Connection* handle, + std::vector & t, + const int & timeout_ms) : ErrorAwareWorker(callback), m_handle(handle), m_topic_partitions(t), @@ -356,7 +356,7 @@ void ProducerInitTransactions::OnError() { */ ProducerBeginTransaction::ProducerBeginTransaction(Napi::FunctionReference* callback, - Producer* producer) + Producer* producer) : ErrorAwareWorker(callback), producer(producer) {} ProducerBeginTransaction::~ProducerBeginTransaction() {} @@ -508,7 +508,7 @@ ProducerSendOffsetsToTransaction::~ProducerSendOffsetsToTransaction() {} void ProducerSendOffsetsToTransaction::Execute() { Baton b = producer->SendOffsetsToTransaction(m_topic_partitions, consumer, - m_timeout_ms); + m_timeout_ms); if (b.err() != RdKafka::ERR_NO_ERROR) { SetErrorBaton(b); @@ -655,16 +655,16 @@ void KafkaConsumerDisconnect::OnError() { */ KafkaConsumerConsumeLoop::KafkaConsumerConsumeLoop(Napi::FunctionReference *callback, - KafkaConsumer* consumer, - const int & timeout_ms, - const int & timeout_sleep_delay_ms) : + KafkaConsumer* consumer, + const int & timeout_ms, + const int & timeout_sleep_delay_ms) : MessageWorker(callback), consumer(consumer), m_looping(true), m_timeout_ms(timeout_ms), m_timeout_sleep_delay_ms(timeout_sleep_delay_ms) { uv_thread_create(&thread_event_loop, KafkaConsumerConsumeLoop::ConsumeLoop, - reinterpret_cast(this)); + reinterpret_cast(this)); } KafkaConsumerConsumeLoop::~KafkaConsumerConsumeLoop() {} @@ -691,35 +691,35 @@ void KafkaConsumerConsumeLoop::ConsumeLoop(void* arg) { if (ec == RdKafka::ERR_NO_ERROR) { RdKafka::Message *message = b.data(); switch (message->err()) { - case RdKafka::ERR__PARTITION_EOF: - bus.Send(message); - break; - - case RdKafka::ERR__TIMED_OUT: - case RdKafka::ERR__TIMED_OUT_QUEUE: - delete message; - if (consumerLoop->m_timeout_sleep_delay_ms > 0) { - // If it is timed out this could just mean there were no - // new messages fetched quickly enough. This isn't really - // an error that should kill us. - #ifndef _WIN32 - usleep(consumerLoop->m_timeout_sleep_delay_ms*1000); - #else - _sleep(consumerLoop->m_timeout_sleep_delay_ms); - #endif - } - break; - case RdKafka::ERR_NO_ERROR: - bus.Send(message); - break; - default: - // Unknown error. We need to break out of this - consumerLoop->SetErrorBaton(b); - consumerLoop->m_looping = false; - break; - } + case RdKafka::ERR__PARTITION_EOF: + bus.Send(message); + break; + + case RdKafka::ERR__TIMED_OUT: + case RdKafka::ERR__TIMED_OUT_QUEUE: + delete message; + if (consumerLoop->m_timeout_sleep_delay_ms > 0) { + // If it is timed out this could just mean there were no + // new messages fetched quickly enough. This isn't really + // an error that should kill us. + #ifndef _WIN32 + usleep(consumerLoop->m_timeout_sleep_delay_ms*1000); + #else + _sleep(consumerLoop->m_timeout_sleep_delay_ms); + #endif + } + break; + case RdKafka::ERR_NO_ERROR: + bus.Send(message); + break; + default: + // Unknown error. We need to break out of this + consumerLoop->SetErrorBaton(b); + consumerLoop->m_looping = false; + break; + } } else if (ec == RdKafka::ERR_UNKNOWN_TOPIC_OR_PART || - ec == RdKafka::ERR_TOPIC_AUTHORIZATION_FAILED) { + ec == RdKafka::ERR_TOPIC_AUTHORIZATION_FAILED) { bus.SendWarning(ec); } else { // Unknown error. We need to break out of this @@ -730,7 +730,7 @@ void KafkaConsumerConsumeLoop::ConsumeLoop(void* arg) { } void KafkaConsumerConsumeLoop::HandleMessageCallback(RdKafka::Message* msg, - RdKafka::ErrorCode ec) { + RdKafka::ErrorCode ec) { Napi::HandleScope scope(env); const unsigned int argc = 4; @@ -745,23 +745,23 @@ void KafkaConsumerConsumeLoop::HandleMessageCallback(RdKafka::Message* msg, argv[3] = env.Null(); switch (msg->err()) { case RdKafka::ERR__PARTITION_EOF: { - argv[1] = env.Null(); - Napi::Object eofEvent = Napi::Object::New(env); - - (eofEvent).Set(Napi::String::New(env, "topic"), - Napi::String::New(env, msg->topic_name())); - (eofEvent).Set(Napi::String::New(env, "offset"), - Napi::Number::New(env, msg->offset())); - (eofEvent).Set(Napi::String::New(env, "partition"), - Napi::Number::New(env, msg->partition())); - - argv[2] = eofEvent; - break; + argv[1] = env.Null(); + Napi::Object eofEvent = Napi::Object::New(env); + + (eofEvent).Set(Napi::String::New(env, "topic"), + Napi::String::New(env, msg->topic_name())); + (eofEvent).Set(Napi::String::New(env, "offset"), + Napi::Number::New(env, msg->offset())); + (eofEvent).Set(Napi::String::New(env, "partition"), + Napi::Number::New(env, msg->partition())); + + argv[2] = eofEvent; + break; } default: - argv[1] = Conversion::Message::ToV8Object(msg); - argv[2] = env.Null(); - break; + argv[1] = Conversion::Message::ToV8Object(msg); + argv[2] = env.Null(); + break; } // We can delete msg now @@ -796,10 +796,10 @@ void KafkaConsumerConsumeLoop::OnError() { */ KafkaConsumerConsumeNum::KafkaConsumerConsumeNum(Napi::FunctionReference *callback, - KafkaConsumer* consumer, - const uint32_t & num_messages, - const int & timeout_ms, - bool timeout_only_for_first_message) : + KafkaConsumer* consumer, + const uint32_t & num_messages, + const int & timeout_ms, + bool timeout_only_for_first_message) : ErrorAwareWorker(callback), m_consumer(consumer), m_num_messages(num_messages), @@ -821,49 +821,49 @@ void KafkaConsumerConsumeNum::Execute() { RdKafka::Message *message = b.data(); RdKafka::ErrorCode errorCode = message->err(); switch (errorCode) { - case RdKafka::ERR__PARTITION_EOF: - // If partition EOF and have consumed messages, retry with timeout 1 - // This allows getting ready messages, while not waiting for new ones - if (m_messages.size() > eof_event_count) { - timeout_ms = 1; - } - - // We will only go into this code path when `enable.partition.eof` - // is set to true. In this case, consumer is also interested in EOF - // messages, so we return an EOF message - m_messages.push_back(message); - eof_event_count += 1; - break; - case RdKafka::ERR__TIMED_OUT: - case RdKafka::ERR__TIMED_OUT_QUEUE: - // Break of the loop if we timed out - delete message; - looping = false; - break; - case RdKafka::ERR_NO_ERROR: - m_messages.push_back(b.data()); - - // This allows getting ready messages, while not waiting for new ones. - // This is useful when we want to get the as many messages as possible - // within the timeout but not wait if we already have one or more - // messages. - if (m_timeout_only_for_first_message) { - timeout_ms = 1; - } - - break; - default: - // Set the error for any other errors and break - delete message; - if (m_messages.size() == eof_event_count) { - SetErrorBaton(Baton(errorCode)); - } - looping = false; - break; + case RdKafka::ERR__PARTITION_EOF: + // If partition EOF and have consumed messages, retry with timeout 1 + // This allows getting ready messages, while not waiting for new ones + if (m_messages.size() > eof_event_count) { + timeout_ms = 1; + } + + // We will only go into this code path when `enable.partition.eof` + // is set to true. In this case, consumer is also interested in EOF + // messages, so we return an EOF message + m_messages.push_back(message); + eof_event_count += 1; + break; + case RdKafka::ERR__TIMED_OUT: + case RdKafka::ERR__TIMED_OUT_QUEUE: + // Break of the loop if we timed out + delete message; + looping = false; + break; + case RdKafka::ERR_NO_ERROR: + m_messages.push_back(b.data()); + + // This allows getting ready messages, while not waiting for new ones. + // This is useful when we want to get the as many messages as possible + // within the timeout but not wait if we already have one or more + // messages. + if (m_timeout_only_for_first_message) { + timeout_ms = 1; + } + + break; + default: + // Set the error for any other errors and break + delete message; + if (m_messages.size() == eof_event_count) { + SetErrorBaton(Baton(errorCode)); + } + looping = false; + break; } } else { if (m_messages.size() == eof_event_count) { - SetErrorBaton(b); + SetErrorBaton(b); } looping = false; } @@ -883,35 +883,35 @@ void KafkaConsumerConsumeNum::OnOK() { int returnArrayIndex = -1; int eofEventsArrayIndex = -1; for (std::vector::iterator it = m_messages.begin(); - it != m_messages.end(); ++it) { + it != m_messages.end(); ++it) { RdKafka::Message* message = *it; switch (message->err()) { - case RdKafka::ERR_NO_ERROR: - ++returnArrayIndex; - (returnArray).Set(returnArrayIndex, - Conversion::Message::ToV8Object(message)); - break; - case RdKafka::ERR__PARTITION_EOF: - ++eofEventsArrayIndex; - - // create EOF event - Napi::Object eofEvent = Napi::Object::New(env); - - (eofEvent).Set(Napi::String::New(env, "topic"), - Napi::String::New(env, message->topic_name())); - (eofEvent).Set(Napi::String::New(env, "offset"), - Napi::Number::New(env, message->offset())); - (eofEvent).Set(Napi::String::New(env, "partition"), - Napi::Number::New(env, message->partition())); - - // also store index at which position in the message array this event - // was emitted this way, we can later emit it at the right point in - // time - (eofEvent).Set(Napi::String::New(env, "messageIndex"), - Napi::Number::New(env, returnArrayIndex)); - - (eofEventsArray).Set(eofEventsArrayIndex, eofEvent); + case RdKafka::ERR_NO_ERROR: + ++returnArrayIndex; + (returnArray).Set(returnArrayIndex, + Conversion::Message::ToV8Object(message)); + break; + case RdKafka::ERR__PARTITION_EOF: + ++eofEventsArrayIndex; + + // create EOF event + Napi::Object eofEvent = Napi::Object::New(env); + + (eofEvent).Set(Napi::String::New(env, "topic"), + Napi::String::New(env, message->topic_name())); + (eofEvent).Set(Napi::String::New(env, "offset"), + Napi::Number::New(env, message->offset())); + (eofEvent).Set(Napi::String::New(env, "partition"), + Napi::Number::New(env, message->partition())); + + // also store index at which position in the message array this event + // was emitted this way, we can later emit it at the right point in + // time + (eofEvent).Set(Napi::String::New(env, "messageIndex"), + Napi::Number::New(env, returnArrayIndex)); + + (eofEventsArray).Set(eofEventsArrayIndex, eofEvent); } delete message; @@ -929,7 +929,7 @@ void KafkaConsumerConsumeNum::OnError() { if (m_messages.size() > 0) { for (std::vector::iterator it = m_messages.begin(); - it != m_messages.end(); ++it) { + it != m_messages.end(); ++it) { RdKafka::Message* message = *it; delete message; } @@ -953,8 +953,8 @@ void KafkaConsumerConsumeNum::OnError() { */ KafkaConsumerConsume::KafkaConsumerConsume(Napi::FunctionReference *callback, - KafkaConsumer* consumer, - const int & timeout_ms) : + KafkaConsumer* consumer, + const int & timeout_ms) : ErrorAwareWorker(callback), consumer(consumer), m_timeout_ms(timeout_ms) {} @@ -1010,9 +1010,9 @@ void KafkaConsumerConsume::OnError() { */ KafkaConsumerCommitted::KafkaConsumerCommitted(Napi::FunctionReference *callback, - KafkaConsumer* consumer, - std::vector & t, - const int & timeout_ms) : + KafkaConsumer* consumer, + std::vector & t, + const int & timeout_ms) : ErrorAwareWorker(callback), m_consumer(consumer), m_topic_partitions(t), @@ -1053,7 +1053,7 @@ void KafkaConsumerCommitted::OnError() { /** * @brief KafkaConsumer commit offsets with a callback function. - * + * * The first callback argument is the commit error, or null on success. * * @see RdKafka::KafkaConsumer::commitSync @@ -1116,9 +1116,9 @@ void KafkaConsumerCommitCb::OnError() { */ KafkaConsumerSeek::KafkaConsumerSeek(Napi::FunctionReference *callback, - KafkaConsumer* consumer, - const RdKafka::TopicPartition * toppar, - const int & timeout_ms) : + KafkaConsumer* consumer, + const RdKafka::TopicPartition * toppar, + const int & timeout_ms) : ErrorAwareWorker(callback), m_consumer(consumer), m_toppar(toppar), @@ -1172,9 +1172,9 @@ void KafkaConsumerSeek::OnError() { * */ AdminClientCreateTopic::AdminClientCreateTopic(Napi::FunctionReference *callback, - AdminClient* client, - rd_kafka_NewTopic_t* topic, - const int & timeout_ms) : + AdminClient* client, + rd_kafka_NewTopic_t* topic, + const int & timeout_ms) : ErrorAwareWorker(callback), m_client(client), m_topic(topic), @@ -1219,9 +1219,9 @@ void AdminClientCreateTopic::OnError() { * */ AdminClientDeleteTopic::AdminClientDeleteTopic(Napi::FunctionReference *callback, - AdminClient* client, - rd_kafka_DeleteTopic_t* topic, - const int & timeout_ms) : + AdminClient* client, + rd_kafka_DeleteTopic_t* topic, + const int & timeout_ms) : ErrorAwareWorker(callback), m_client(client), m_topic(topic), @@ -1266,10 +1266,10 @@ void AdminClientDeleteTopic::OnError() { * */ AdminClientCreatePartitions::AdminClientCreatePartitions( - Napi::FunctionReference *callback, - AdminClient* client, - rd_kafka_NewPartitions_t* partitions, - const int & timeout_ms) : + Napi::FunctionReference *callback, + AdminClient* client, + rd_kafka_NewPartitions_t* partitions, + const int & timeout_ms) : ErrorAwareWorker(callback), m_client(client), m_partitions(partitions), @@ -1331,7 +1331,7 @@ AdminClientListGroups::~AdminClientListGroups() { void AdminClientListGroups::Execute() { Baton b = m_client->ListGroups(m_is_match_states_set, m_match_states, - m_timeout_ms, &m_event_response); + m_timeout_ms, &m_event_response); if (b.err() != RdKafka::ERR_NO_ERROR) { SetErrorBaton(b); } @@ -1386,7 +1386,7 @@ AdminClientDescribeGroups::~AdminClientDescribeGroups() { void AdminClientDescribeGroups::Execute() { Baton b = m_client->DescribeGroups(m_groups, m_include_authorized_operations, - m_timeout_ms, &m_event_response); + m_timeout_ms, &m_event_response); if (b.err() != RdKafka::ERR_NO_ERROR) { SetErrorBaton(b); } @@ -1444,7 +1444,7 @@ AdminClientDeleteGroups::~AdminClientDeleteGroups() { void AdminClientDeleteGroups::Execute() { Baton b = m_client->DeleteGroups(m_group_list, m_group_cnt, m_timeout_ms, - &m_event_response); + &m_event_response); if (b.err() != RdKafka::ERR_NO_ERROR) { SetErrorBaton(b); } @@ -1505,8 +1505,8 @@ AdminClientListConsumerGroupOffsets::~AdminClientListConsumerGroupOffsets() { void AdminClientListConsumerGroupOffsets::Execute() { Baton b = m_client->ListConsumerGroupOffsets(m_req, m_req_cnt, - m_require_stable_offsets, - m_timeout_ms, &m_event_response); + m_require_stable_offsets, + m_timeout_ms, &m_event_response); if (b.err() != RdKafka::ERR_NO_ERROR) { SetErrorBaton(b); } @@ -1566,8 +1566,8 @@ AdminClientDeleteRecords::~AdminClientDeleteRecords() { void AdminClientDeleteRecords::Execute() { Baton b = m_client->DeleteRecords(m_del_records, m_del_records_cnt, - m_operation_timeout_ms, m_timeout_ms, - &m_event_response); + m_operation_timeout_ms, m_timeout_ms, + &m_event_response); if (b.err() != RdKafka::ERR_NO_ERROR) { SetErrorBaton(b); } @@ -1597,7 +1597,7 @@ void AdminClientDeleteRecords::OnError() { /** * @brief Describe Topics in an asynchronous worker - * + * * This callback will describe topics. */ AdminClientDescribeTopics::AdminClientDescribeTopics( @@ -1623,7 +1623,7 @@ AdminClientDescribeTopics::~AdminClientDescribeTopics() { void AdminClientDescribeTopics::Execute() { Baton b = m_client->DescribeTopics(m_topics, m_include_authorized_operations, - m_timeout_ms, &m_event_response); + m_timeout_ms, &m_event_response); if (b.err() != RdKafka::ERR_NO_ERROR) { SetErrorBaton(b); } @@ -1652,7 +1652,7 @@ void AdminClientDescribeTopics::OnError() { /** * @brief ListOffsets in an asynchronous worker - * + * * This callback will list requested offsets for the specified topic partitions. */ AdminClientListOffsets::AdminClientListOffsets( @@ -1677,7 +1677,7 @@ AdminClientListOffsets::~AdminClientListOffsets() { void AdminClientListOffsets::Execute() { Baton b = m_client->ListOffsets(m_partitions, m_timeout_ms, m_isolation_level, - &m_event_response); + &m_event_response); if (b.err() != RdKafka::ERR_NO_ERROR) { SetErrorBaton(b); } diff --git a/src/workers.h b/src/workers.h index 09699618..0194e68e 100644 --- a/src/workers.h +++ b/src/workers.h @@ -532,8 +532,8 @@ class AdminClientCreatePartitions : public ErrorAwareWorker { class AdminClientListGroups : public ErrorAwareWorker { public: AdminClientListGroups(Napi::FunctionReference *, NodeKafka::AdminClient *, bool, - std::vector &, - const int &); + std::vector &, + const int &); ~AdminClientListGroups(); void Execute(); @@ -554,7 +554,7 @@ class AdminClientListGroups : public ErrorAwareWorker { class AdminClientDescribeGroups : public ErrorAwareWorker { public: AdminClientDescribeGroups(Napi::FunctionReference *, NodeKafka::AdminClient *, - std::vector &, bool, const int &); + std::vector &, bool, const int &); ~AdminClientDescribeGroups(); void Execute(); @@ -575,7 +575,7 @@ class AdminClientDescribeGroups : public ErrorAwareWorker { class AdminClientDeleteGroups : public ErrorAwareWorker { public: AdminClientDeleteGroups(Napi::FunctionReference *, NodeKafka::AdminClient *, - rd_kafka_DeleteGroup_t **, size_t, const int &); + rd_kafka_DeleteGroup_t **, size_t, const int &); ~AdminClientDeleteGroups(); void Execute(); @@ -596,8 +596,8 @@ class AdminClientDeleteGroups : public ErrorAwareWorker { class AdminClientListConsumerGroupOffsets : public ErrorAwareWorker { public: AdminClientListConsumerGroupOffsets(Napi::FunctionReference *, NodeKafka::AdminClient *, - rd_kafka_ListConsumerGroupOffsets_t **, size_t, bool, - const int &); + rd_kafka_ListConsumerGroupOffsets_t **, size_t, bool, + const int &); ~AdminClientListConsumerGroupOffsets(); void Execute(); @@ -619,8 +619,8 @@ class AdminClientListConsumerGroupOffsets : public ErrorAwareWorker { class AdminClientDeleteRecords : public ErrorAwareWorker { public: AdminClientDeleteRecords(Napi::FunctionReference *, NodeKafka::AdminClient *, - rd_kafka_DeleteRecords_t **, size_t, const int &, - const int &); + rd_kafka_DeleteRecords_t **, size_t, const int &, + const int &); ~AdminClientDeleteRecords(); void Execute(); @@ -642,8 +642,8 @@ class AdminClientDeleteRecords : public ErrorAwareWorker { class AdminClientDescribeTopics : public ErrorAwareWorker { public: AdminClientDescribeTopics(Napi::FunctionReference *, NodeKafka::AdminClient *, - rd_kafka_TopicCollection_t *, const bool, - const int &); + rd_kafka_TopicCollection_t *, const bool, + const int &); ~AdminClientDescribeTopics(); void Execute(); @@ -664,8 +664,8 @@ class AdminClientDescribeTopics : public ErrorAwareWorker { class AdminClientListOffsets : public ErrorAwareWorker { public: AdminClientListOffsets(Napi::FunctionReference *, NodeKafka::AdminClient *, - rd_kafka_topic_partition_list_t *, const int &, - rd_kafka_IsolationLevel_t); + rd_kafka_topic_partition_list_t *, const int &, + rd_kafka_IsolationLevel_t); ~AdminClientListOffsets(); void Execute();