Skip to content

Commit 74fb35c

Browse files
authored
fix: don't block a node process from exiting (#157)
* fix: don't block a node process from exiting * fix: respect `logLevel` and `logger` params when using `"lastBuild"` * fix: print logs on the same event loop cycle
1 parent d4a39f5 commit 74fb35c

File tree

14 files changed

+184
-100
lines changed

14 files changed

+184
-100
lines changed

.github/workflows/build.yml

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -243,14 +243,16 @@ jobs:
243243
run: npm ci
244244

245245
- name: Build binary
246-
run: node ./dist/cli/cli.js build
246+
run: node ./dist/cli/cli.js build --noUsageExample
247247

248248
- name: Run standalone tests
249249
run: npm run test:standalone
250250

251251
model-dependent-tests:
252252
name: Model dependent tests
253-
runs-on: macos-13
253+
runs-on: macos-14
254+
env:
255+
NODE_LLAMA_CPP_METAL: false
254256
needs:
255257
- build
256258
steps:
@@ -280,7 +282,7 @@ jobs:
280282
run: npm ci
281283

282284
- name: Build binary
283-
run: node ./dist/cli/cli.js build
285+
run: node ./dist/cli/cli.js build --noUsageExample
284286

285287
- name: Cache models
286288
id: cache-test-models

llama/addon.cpp

Lines changed: 67 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -9,16 +9,28 @@
99
#include "llama.h"
1010
#include "napi.h"
1111

12-
Napi::FunctionReference loggerCallbackFunctionReference;
13-
Napi::ThreadSafeFunction threadSafeLoggerCallback;
14-
bool loggerCallbackSet = false;
15-
int addonLoggerLogLevel = 5;
16-
1712
struct addon_logger_log {
1813
const int logLevelNumber;
1914
const std::stringstream* stringStream;
2015
};
2116

17+
using AddonThreadSafeLogCallbackFunctionContext = Napi::Reference<Napi::Value>;
18+
void addonCallJsLogCallback(
19+
Napi::Env env,
20+
Napi::Function callback,
21+
AddonThreadSafeLogCallbackFunctionContext *context,
22+
addon_logger_log* data
23+
);
24+
using AddonThreadSafeLogCallbackFunction = Napi::TypedThreadSafeFunction<
25+
AddonThreadSafeLogCallbackFunctionContext,
26+
addon_logger_log,
27+
addonCallJsLogCallback
28+
>;
29+
30+
AddonThreadSafeLogCallbackFunction addonThreadSafeLoggerCallback;
31+
bool addonJsLoggerCallbackSet = false;
32+
int addonLoggerLogLevel = 5;
33+
2234
std::string addon_model_token_to_piece(const struct llama_model * model, llama_token token) {
2335
std::vector<char> result(8, 0);
2436
const int n_tokens = llama_token_to_piece(model, token, result.data(), result.size());
@@ -820,14 +832,31 @@ int addonGetGgmlLogLevelNumber(ggml_log_level level) {
820832
return 1;
821833
}
822834

823-
static void addonWrapperJSLogCallback(Napi::Env env, Napi::Function jsCallback, addon_logger_log* data) {
824-
auto status = jsCallback.Call({
825-
Napi::Number::New(env, data->logLevelNumber),
826-
Napi::String::New(env, data->stringStream->str())
827-
});
835+
void addonCallJsLogCallback(
836+
Napi::Env env,
837+
Napi::Function callback,
838+
AddonThreadSafeLogCallbackFunctionContext * context,
839+
addon_logger_log * data
840+
) {
841+
if (env != nullptr && callback != nullptr) {
842+
callback.Call({
843+
Napi::Number::New(env, data->logLevelNumber),
844+
Napi::String::New(env, data->stringStream->str())
845+
});
846+
} else if (data != nullptr) {
847+
if (data->logLevelNumber == 2) {
848+
fputs(data->stringStream->str().c_str(), stderr);
849+
fflush(stderr);
850+
} else {
851+
fputs(data->stringStream->str().c_str(), stdout);
852+
fflush(stdout);
853+
}
854+
}
828855

829-
delete data->stringStream;
830-
delete data;
856+
if (data != nullptr) {
857+
delete data->stringStream;
858+
delete data;
859+
}
831860
}
832861

833862
static void addonLlamaCppLogCallback(ggml_log_level level, const char * text, void * user_data) {
@@ -837,7 +866,7 @@ static void addonLlamaCppLogCallback(ggml_log_level level, const char * text, vo
837866
return;
838867
}
839868

840-
if (loggerCallbackSet) {
869+
if (addonJsLoggerCallbackSet) {
841870
std::stringstream* stringStream = new std::stringstream();
842871
if (text != nullptr) {
843872
*stringStream << text;
@@ -848,14 +877,14 @@ static void addonLlamaCppLogCallback(ggml_log_level level, const char * text, vo
848877
stringStream
849878
};
850879

851-
auto status = threadSafeLoggerCallback.NonBlockingCall(data, addonWrapperJSLogCallback);
880+
auto status = addonThreadSafeLoggerCallback.NonBlockingCall(data);
852881

853882
if (status == napi_ok) {
854883
return;
855884
}
856885
}
857886

858-
if (level == GGML_LOG_LEVEL_ERROR) {
887+
if (level == 2) {
859888
fputs(text, stderr);
860889
fflush(stderr);
861890
} else {
@@ -866,28 +895,41 @@ static void addonLlamaCppLogCallback(ggml_log_level level, const char * text, vo
866895

867896
Napi::Value setLogger(const Napi::CallbackInfo& info) {
868897
if (info.Length() < 1 || !info[0].IsFunction()) {
869-
if (loggerCallbackSet) {
870-
threadSafeLoggerCallback.Release();
871-
threadSafeLoggerCallback = nullptr;
872-
loggerCallbackFunctionReference.Unref();
873-
loggerCallbackSet = false;
898+
if (addonJsLoggerCallbackSet) {
899+
addonJsLoggerCallbackSet = false;
900+
addonThreadSafeLoggerCallback.Release();
874901
}
875902

876903
return info.Env().Undefined();
877904
}
878905

879-
loggerCallbackFunctionReference = Napi::Persistent(info[0].As<Napi::Function>());
880-
loggerCallbackFunctionReference.Ref();
881-
threadSafeLoggerCallback = Napi::ThreadSafeFunction::New(info.Env(), loggerCallbackFunctionReference.Value(), "loggerCallback", 0, 1);
882-
loggerCallbackSet = true;
906+
auto addonLoggerJSCallback = info[0].As<Napi::Function>();
907+
AddonThreadSafeLogCallbackFunctionContext *context = new Napi::Reference<Napi::Value>(Napi::Persistent(info.This()));
908+
addonThreadSafeLoggerCallback = AddonThreadSafeLogCallbackFunction::New(
909+
info.Env(),
910+
addonLoggerJSCallback,
911+
"loggerCallback",
912+
0,
913+
1,
914+
context,
915+
[]( Napi::Env, void *, AddonThreadSafeLogCallbackFunctionContext *ctx ) {
916+
addonJsLoggerCallbackSet = false;
917+
918+
delete ctx;
919+
}
920+
);
921+
addonJsLoggerCallbackSet = true;
922+
923+
// prevent blocking the main node process from exiting due to active resources
924+
addonThreadSafeLoggerCallback.Unref(info.Env());
883925

884926
return info.Env().Undefined();
885927
}
886928

887929
Napi::Value setLoggerLogLevel(const Napi::CallbackInfo& info) {
888930
if (info.Length() < 1 || !info[0].IsNumber()) {
889931
addonLoggerLogLevel = 5;
890-
932+
891933
return info.Env().Undefined();
892934
}
893935

src/bindings/Llama.ts

Lines changed: 37 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -30,12 +30,11 @@ export class Llama {
3030
/** @internal */ private _logLevel: LlamaLogLevel;
3131
/** @internal */ private _pendingLog: string | null = null;
3232
/** @internal */ private _pendingLogLevel: LlamaLogLevel | null = null;
33-
/** @internal */ private _logDispatchTimeout: ReturnType<typeof setTimeout> | undefined = undefined;
33+
/** @internal */ private _logDispatchQueuedMicrotasks: number = 0;
3434
/** @internal */ private _previousLog: string | null = null;
3535
/** @internal */ private _previousLogLevel: LlamaLogLevel | null = null;
3636
/** @internal */ private _nextLogNeedNewLine: boolean = false;
3737

38-
3938
private constructor({
4039
bindings, metal, cuda, logLevel, logger, buildType, cmakeOptions, llamaCppRelease
4140
}: {
@@ -63,38 +62,13 @@ export class Llama {
6362
release: llamaCppRelease.release
6463
});
6564

66-
this._dispatchPendingLog = this._dispatchPendingLog.bind(this);
65+
this._dispatchPendingLogMicrotask = this._dispatchPendingLogMicrotask.bind(this);
6766
this._onAddonLog = this._onAddonLog.bind(this);
6867

6968
this._bindings.setLogger(this._onAddonLog);
7069
this._bindings.setLoggerLogLevel(LlamaLogLevelToAddonLogLevel.get(this._logLevel) ?? defaultLogLevel);
7170
}
7271

73-
/** @internal */
74-
public static async _create({
75-
bindings, buildType, buildMetadata, logLevel, logger
76-
}: {
77-
bindings: BindingModule
78-
buildType: "localBuild" | "prebuilt",
79-
buildMetadata: BuildMetadataFile,
80-
logLevel: LlamaLogLevel,
81-
logger: (level: LlamaLogLevel, message: string) => void,
82-
}) {
83-
return new Llama({
84-
bindings,
85-
buildType,
86-
metal: buildMetadata.buildOptions.computeLayers.metal,
87-
cuda: buildMetadata.buildOptions.computeLayers.cuda,
88-
cmakeOptions: buildMetadata.buildOptions.customCmakeOptions,
89-
llamaCppRelease: {
90-
repo: buildMetadata.buildOptions.llamaCpp.repo,
91-
release: buildMetadata.buildOptions.llamaCpp.release
92-
},
93-
logLevel,
94-
logger
95-
});
96-
}
97-
9872
public get metal() {
9973
return this._metal;
10074
}
@@ -142,11 +116,10 @@ export class Llama {
142116
return this._bindings.systemInfo();
143117
}
144118

119+
/** @internal */
145120
private _onAddonLog(level: number, message: string) {
146121
const llamaLogLevel = addonLogLevelToLlamaLogLevel.get(level) ?? LlamaLogLevel.fatal;
147122

148-
clearTimeout(this._logDispatchTimeout);
149-
150123
if (this._pendingLog != null && this._pendingLogLevel != null && this._pendingLogLevel != llamaLogLevel) {
151124
this._callLogger(this._pendingLogLevel, this._pendingLog);
152125
this._pendingLog = null;
@@ -168,21 +141,26 @@ export class Llama {
168141
if (nextLog !== "") {
169142
this._pendingLog = nextLog;
170143
this._pendingLogLevel = llamaLogLevel;
171-
clearTimeout(this._logDispatchTimeout);
172-
this._logDispatchTimeout = setTimeout(this._dispatchPendingLog, 0);
144+
145+
queueMicrotask(this._dispatchPendingLogMicrotask);
146+
this._logDispatchQueuedMicrotasks++;
173147
} else
174148
this._pendingLog = null;
175149
}
176150

177-
private _dispatchPendingLog() {
178-
clearTimeout(this._logDispatchTimeout);
151+
/** @internal */
152+
private _dispatchPendingLogMicrotask() {
153+
this._logDispatchQueuedMicrotasks--;
154+
if (this._logDispatchQueuedMicrotasks !== 0)
155+
return;
179156

180157
if (this._pendingLog != null && this._pendingLogLevel != null) {
181158
this._callLogger(this._pendingLogLevel, this._pendingLog);
182159
this._pendingLog = null;
183160
}
184161
}
185162

163+
/** @internal */
186164
private _callLogger(level: LlamaLogLevel, message: string) {
187165
// llama.cpp uses dots to indicate progress, so we don't want to print them as different lines,
188166
// and instead, append to the same log line
@@ -210,6 +188,31 @@ export class Llama {
210188
this._previousLogLevel = level;
211189
}
212190

191+
/** @internal */
192+
public static async _create({
193+
bindings, buildType, buildMetadata, logLevel, logger
194+
}: {
195+
bindings: BindingModule
196+
buildType: "localBuild" | "prebuilt",
197+
buildMetadata: BuildMetadataFile,
198+
logLevel: LlamaLogLevel,
199+
logger: (level: LlamaLogLevel, message: string) => void,
200+
}) {
201+
return new Llama({
202+
bindings,
203+
buildType,
204+
metal: buildMetadata.buildOptions.computeLayers.metal,
205+
cuda: buildMetadata.buildOptions.computeLayers.cuda,
206+
cmakeOptions: buildMetadata.buildOptions.customCmakeOptions,
207+
llamaCppRelease: {
208+
repo: buildMetadata.buildOptions.llamaCpp.repo,
209+
release: buildMetadata.buildOptions.llamaCpp.release
210+
},
211+
logLevel,
212+
logger
213+
});
214+
}
215+
213216
public static defaultConsoleLogger(level: LlamaLogLevel, message: string) {
214217
switch (level) {
215218
case LlamaLogLevel.disabled:

0 commit comments

Comments
 (0)