We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 3fbc063 commit 314ce7dCopy full SHA for 314ce7d
llama_cpp/llama.py
@@ -74,7 +74,7 @@ def __init__(
74
self.tokens_consumed = 0
75
self.n_batch = min(n_ctx, n_batch)
76
77
- self.n_threads = n_threads or multiprocessing.cpu_count()
+ self.n_threads = n_threads or max(multiprocessing.cpu_count() // 2, 1)
78
79
if not os.path.exists(model_path):
80
raise ValueError(f"Model path does not exist: {model_path}")
0 commit comments