We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
2 parents 798e1d0 + 8817777 commit 0422152Copy full SHA for 0422152
ktransformers/util/custom_gguf.py
@@ -27,6 +27,7 @@
27
import KTransformersOps
28
from .custom_loader import SafeTensorLoader
29
import ctypes
30
+import math
31
32
class GGMLQuantizationType(IntEnum):
33
F32 = 0
@@ -230,7 +231,7 @@ def load_gguf(self, f):
230
231
shape = [read_value(f, DATA_TYPES["uint64"]) for _ in range(shape_len)]
232
ggml_type = read_value(f, DATA_TYPES["uint32"])
233
bad_offset = read_value(f, DATA_TYPES["uint64"])
- n_elems = int(np.prod(shape))
234
+ n_elems = int(math.prod(shape))
235
block_size, type_size = GGML_QUANT_SIZES[ggml_type]
236
n_bytes = n_elems * type_size // block_size
237
np_dims = tuple(reversed(shape))
0 commit comments