Skip to content

Commit 001b67f

Browse files
committed
Fix enum variable reference issue in mtmd, replace the kv_cache_clear() with memory_clear()
Thank to @flamingrickpat
1 parent a617e31 commit 001b67f

File tree

1 file changed

+4
-4
lines changed

1 file changed

+4
-4
lines changed

llama_cpp/llama_chat_format.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2965,7 +2965,7 @@ def __call__(
29652965

29662966
# Reset llama context
29672967
llama.reset()
2968-
llama._ctx.kv_cache_clear()
2968+
llama._ctx.memory_clear(True)
29692969

29702970
# Process each chunk
29712971
n_past = llama_cpp.llama_pos(0)
@@ -2978,7 +2978,7 @@ def __call__(
29782978

29792979
chunk_type = self._mtmd_cpp.mtmd_input_chunk_get_type(chunk)
29802980

2981-
if chunk_type == self._mtmd_cpp.MTMD_INPUT_CHUNK_TYPE_TEXT:
2981+
if chunk_type == self._mtmd_cpp.mtmd_input_chunk_type.MTMD_INPUT_CHUNK_TYPE_TEXT:
29822982
# Handle text chunk
29832983
n_tokens_out = ctypes.c_size_t()
29842984
tokens_ptr = self._mtmd_cpp.mtmd_input_chunk_get_tokens_text(
@@ -2995,7 +2995,7 @@ def __call__(
29952995
)
29962996
llama.eval(tokens)
29972997

2998-
elif chunk_type in [self._mtmd_cpp.MTMD_INPUT_CHUNK_TYPE_IMAGE, self._mtmd_cpp.MTMD_INPUT_CHUNK_TYPE_AUDIO]:
2998+
elif chunk_type in [self._mtmd_cpp.mtmd_input_chunk_type.MTMD_INPUT_CHUNK_TYPE_IMAGE, self._mtmd_cpp.mtmd_input_chunk_type.MTMD_INPUT_CHUNK_TYPE_AUDIO]:
29992999
# Handle image/audio chunk using helper
30003000
chunk_n_tokens = self._mtmd_cpp.mtmd_input_chunk_get_n_tokens(chunk)
30013001

@@ -3595,7 +3595,7 @@ def __call__(self, **kwargs):
35953595

35963596
# Clear state for multiple runs
35973597
llama.reset()
3598-
llama._ctx.kv_cache_clear()
3598+
llama._ctx.memory_clear(True)
35993599
llama.n_tokens = 0
36003600

36013601
if hasattr(llama, 'input_ids'):

0 commit comments

Comments
 (0)