Skip to content

Commit 8da7f61

Browse files
committed
context : improve llama_context encapsulation
ggml-ci
1 parent b52b79b commit 8da7f61

File tree

5 files changed

+327
-157
lines changed

5 files changed

+327
-157
lines changed

src/llama-adapter.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ bool llama_adapter_cvec::init(const llama_model & model) {
9191
return true;
9292
}
9393

94-
int32_t llama_adapter_cvec::apply(
94+
bool llama_adapter_cvec::apply(
9595
const llama_model & model,
9696
const float * data,
9797
size_t len,
@@ -104,17 +104,17 @@ int32_t llama_adapter_cvec::apply(
104104
// disable the current control vector (but leave allocated for later)
105105
layer_start = -1;
106106
layer_end = -1;
107-
return 0;
107+
return true;
108108
}
109109

110110
if (n_embd != (int) hparams.n_embd) {
111111
LLAMA_LOG_ERROR("%s: control vector n_embd does not match model\n", __func__);
112-
return 1;
112+
return false;
113113
}
114114

115115
if (tensors.empty()) {
116116
if (!init(model)) {
117-
return 1;
117+
return false;
118118
}
119119
}
120120

@@ -130,7 +130,7 @@ int32_t llama_adapter_cvec::apply(
130130
}
131131
}
132132

133-
return 0;
133+
return true;
134134
}
135135

136136
// lora

src/llama-adapter.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ struct llama_adapter_cvec {
1919

2020
struct ggml_tensor * apply_to(struct ggml_context * ctx, struct ggml_tensor * cur, int il) const;
2121

22-
int32_t apply(
22+
bool apply(
2323
const llama_model & model,
2424
const float * data,
2525
size_t len,

0 commit comments

Comments
 (0)