We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 37556bf commit d9a1d90Copy full SHA for d9a1d90
llama_cpp/llama.py
@@ -850,7 +850,7 @@ def __init__(
850
) # 0x7FFFFFFF is INT32 max, will be auto set to all layers
851
self.model_params.main_gpu = main_gpu
852
self.tensor_split = tensor_split
853
- self._p_tensor_split = None
+ self._c_tensor_split = None
854
if self.tensor_split is not None:
855
if len(self.tensor_split) > llama_cpp.LLAMA_MAX_DEVICES:
856
raise ValueError(
0 commit comments