Skip to content

Commit e24c3d7

Browse files
committed
Prefer explicit imports
1 parent 4050143 commit e24c3d7

File tree

1 file changed

+14
-14
lines changed

1 file changed

+14
-14
lines changed

llama_cpp/llama_cpp.py

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -68,11 +68,11 @@ def _load_shared_library(lib_base_name: str):
6868
_lib = _load_shared_library(_lib_base_name)
6969

7070
# C types
71-
LLAMA_FILE_VERSION = ctypes.c_int(1)
71+
LLAMA_FILE_VERSION = c_int(1)
7272
LLAMA_FILE_MAGIC = b"ggjt"
7373
LLAMA_FILE_MAGIC_UNVERSIONED = b"ggml"
7474
LLAMA_SESSION_MAGIC = b"ggsn"
75-
LLAMA_SESSION_VERSION = ctypes.c_int(1)
75+
LLAMA_SESSION_VERSION = c_int(1)
7676

7777
llama_context_p = c_void_p
7878

@@ -128,18 +128,18 @@ class llama_context_params(Structure):
128128

129129
llama_context_params_p = POINTER(llama_context_params)
130130

131-
LLAMA_FTYPE_ALL_F32 = ctypes.c_int(0)
132-
LLAMA_FTYPE_MOSTLY_F16 = ctypes.c_int(1) # except 1d tensors
133-
LLAMA_FTYPE_MOSTLY_Q4_0 = ctypes.c_int(2) # except 1d tensors
134-
LLAMA_FTYPE_MOSTLY_Q4_1 = ctypes.c_int(3) # except 1d tensors
135-
LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = ctypes.c_int(
131+
LLAMA_FTYPE_ALL_F32 = c_int(0)
132+
LLAMA_FTYPE_MOSTLY_F16 = c_int(1) # except 1d tensors
133+
LLAMA_FTYPE_MOSTLY_Q4_0 = c_int(2) # except 1d tensors
134+
LLAMA_FTYPE_MOSTLY_Q4_1 = c_int(3) # except 1d tensors
135+
LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = c_int(
136136
4
137137
) # tok_embeddings.weight and output.weight are F16
138-
LLAMA_FTYPE_MOSTLY_Q4_2 = ctypes.c_int(5) # except 1d tensors
139-
# LLAMA_FTYPE_MOSTYL_Q4_3 = ctypes.c_int(6) # except 1d tensors
140-
LLAMA_FTYPE_MOSTLY_Q8_0 = ctypes.c_int(7) # except 1d tensors
141-
LLAMA_FTYPE_MOSTLY_Q5_0 = ctypes.c_int(8) # except 1d tensors
142-
LLAMA_FTYPE_MOSTLY_Q5_1 = ctypes.c_int(9) # except 1d tensors
138+
LLAMA_FTYPE_MOSTLY_Q4_2 = c_int(5) # except 1d tensors
139+
# LLAMA_FTYPE_MOSTYL_Q4_3 = c_int(6) # except 1d tensors
140+
LLAMA_FTYPE_MOSTLY_Q8_0 = c_int(7) # except 1d tensors
141+
LLAMA_FTYPE_MOSTLY_Q5_0 = c_int(8) # except 1d tensors
142+
LLAMA_FTYPE_MOSTLY_Q5_1 = c_int(9) # except 1d tensors
143143

144144
# Misc
145145
c_float_p = POINTER(c_float)
@@ -216,8 +216,8 @@ def llama_model_quantize(
216216
# Returns 0 on success
217217
def llama_apply_lora_from_file(
218218
ctx: llama_context_p,
219-
path_lora: ctypes.c_char_p,
220-
path_base_model: ctypes.c_char_p,
219+
path_lora: c_char_p,
220+
path_base_model: c_char_p,
221221
n_threads: c_int,
222222
) -> c_int:
223223
return _lib.llama_apply_lora_from_file(ctx, path_lora, path_base_model, n_threads)

0 commit comments

Comments
 (0)