Skip to content

Commit 3a3c20d

Browse files
committed
fix: add helpful messages about solutions to CUDA issues
1 parent f6442ab commit 3a3c20d

File tree

4 files changed

+32
-3
lines changed

4 files changed

+32
-3
lines changed

docs/guide/CUDA.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -156,6 +156,11 @@ ldd ./node_modules/@node-llama-cpp/linux-x64-cuda/bins/linux-x64-cuda/libggml-cu
156156

157157
::::
158158

159+
### Fix the `ggml_cuda_init: failed to initialize CUDA: (null)` Error {#fix-failed-to-initialize-cuda-null}
160+
This error usually happens when the NVIDIA drivers installed on your machine are incompatible with the version of CUDA you have installed.
161+
162+
To fix it, update your NVIDIA drivers to the latest version from the [NVIDIA Driver Downloads](https://www.nvidia.com/en-us/drivers/) page.
163+
159164

160165
## Using `node-llama-cpp` With CUDA
161166
It's recommended to use [`getLlama`](../api/functions/getLlama) without specifying a GPU type,

src/bindings/Llama.ts

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,10 @@ import {LlamaGrammar, LlamaGrammarOptions} from "../evaluator/LlamaGrammar.js";
1111
import {ThreadsSplitter} from "../utils/ThreadsSplitter.js";
1212
import {getLlamaClasses, LlamaClasses} from "../utils/getLlamaClasses.js";
1313
import {BindingModule} from "./AddonTypes.js";
14-
import {BuildGpu, BuildMetadataFile, LlamaGpuType, LlamaLocks, LlamaLogLevel, LlamaLogLevelGreaterThanOrEqual, LlamaNuma} from "./types.js";
14+
import {
15+
BuildGpu, BuildMetadataFile, LlamaGpuType, LlamaLocks, LlamaLogLevel,
16+
LlamaLogLevelGreaterThan, LlamaLogLevelGreaterThanOrEqual, LlamaNuma
17+
} from "./types.js";
1518
import {MemoryOrchestrator, MemoryReservation} from "./utils/MemoryOrchestrator.js";
1619

1720
export const LlamaLogLevelToAddonLogLevel: ReadonlyMap<LlamaLogLevel, number> = new Map([
@@ -41,6 +44,7 @@ export class Llama {
4144
/** @internal */ public readonly _swapOrchestrator: MemoryOrchestrator;
4245
/** @internal */ public readonly _debug: boolean;
4346
/** @internal */ public readonly _threadsSplitter: ThreadsSplitter;
47+
/** @internal */ public _hadErrorLogs: boolean = false;
4448
/** @internal */ private readonly _gpu: LlamaGpuType;
4549
/** @internal */ private readonly _numa: LlamaNuma;
4650
/** @internal */ private readonly _buildType: "localBuild" | "prebuilt";
@@ -470,6 +474,9 @@ export class Llama {
470474

471475
this._previousLog = message;
472476
this._previousLogLevel = level;
477+
478+
if (!this._hadErrorLogs && LlamaLogLevelGreaterThan(level, LlamaLogLevel.error))
479+
this._hadErrorLogs = true;
473480
}
474481

475482
/** @internal */

src/bindings/types.ts

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -117,14 +117,23 @@ export const LlamaVocabularyTypeValues = Object.freeze([
117117
] as const);
118118

119119
/**
120-
*Check if a log level is higher than another log level
120+
* Check if a log level is higher than another log level
121+
* @example
122+
* ```ts
123+
* LlamaLogLevelGreaterThan(LlamaLogLevel.error, LlamaLogLevel.info); // true
124+
* ```
121125
*/
122126
export function LlamaLogLevelGreaterThan(a: LlamaLogLevel, b: LlamaLogLevel): boolean {
123127
return LlamaLogLevelValues.indexOf(a) < LlamaLogLevelValues.indexOf(b);
124128
}
125129

126130
/**
127-
*Check if a log level is higher than or equal to another log level
131+
* Check if a log level is higher than or equal to another log level
132+
* @example
133+
* ```ts
134+
* LlamaLogLevelGreaterThanOrEqual(LlamaLogLevel.error, LlamaLogLevel.info); // true
135+
* LlamaLogLevelGreaterThanOrEqual(LlamaLogLevel.error, LlamaLogLevel.error); // true
136+
* ```
128137
*/
129138
export function LlamaLogLevelGreaterThanOrEqual(a: LlamaLogLevel, b: LlamaLogLevel): boolean {
130139
return LlamaLogLevelValues.indexOf(a) <= LlamaLogLevelValues.indexOf(b);

src/cli/commands/inspect/commands/InspectGpuCommand.ts

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -153,6 +153,9 @@ export const InspectGpuCommand: CommandModule<object, InspectGpuCommand> = {
153153
} else {
154154
console.info(`${chalk.yellow("CUDA:")} ${chalk.green("available")}`);
155155
gpusToLogVramUsageOf.push("cuda");
156+
157+
if (llama._hadErrorLogs)
158+
console.info(chalk.yellow("To resolve errors related to CUDA, see the CUDA guide: ") + documentationPageUrls.CUDA);
156159
}
157160
}
158161

@@ -165,6 +168,11 @@ export const InspectGpuCommand: CommandModule<object, InspectGpuCommand> = {
165168
} else {
166169
console.info(`${chalk.yellow("Vulkan:")} ${chalk.green("available")}`);
167170
gpusToLogVramUsageOf.push("vulkan");
171+
172+
if (llama._hadErrorLogs)
173+
console.info(
174+
chalk.yellow("To resolve errors related to Vulkan, see the Vulkan guide: ") + documentationPageUrls.Vulkan
175+
);
168176
}
169177
}
170178

0 commit comments

Comments
 (0)