Skip to content

Commit 2d98736

Browse files
committed
Update PyLlama.cpp to reflect changes in llama.cpp
1 parent 979eb82 commit 2d98736

File tree

1 file changed

+7
-23
lines changed

1 file changed

+7
-23
lines changed

src/PyLlama.cpp

Lines changed: 7 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,9 @@ class PyLLAMA {
7474
return llama_is_anti_prompt_present(*ctx_ptr, antiprompt_inp);
7575
}
7676
}
77+
void reset_remaining_tokens() {
78+
llama_reset_remaining_tokens(*ctx_ptr);
79+
}
7780
void print_startup_stats() {
7881
llama_print_startup_stats(*ctx_ptr);
7982
}
@@ -85,8 +88,6 @@ class PyLLAMA {
8588
// Write python bindings for gpt_params
8689
gpt_params init_params(
8790
const std::string& model,
88-
const std::string& prompt,
89-
const std::string& antiprompt,
9091
int32_t n_ctx,
9192
int32_t n_predict,
9293
int32_t top_k,
@@ -96,15 +97,10 @@ gpt_params init_params(
9697
int32_t seed,
9798
int32_t n_threads,
9899
int32_t repeat_last_n,
99-
int32_t n_batch,
100-
bool use_color,
101-
bool interactive,
102-
bool interactive_start
100+
int32_t n_batch
103101
) {
104102
gpt_params params{};
105103
params.model = model;
106-
params.prompt = prompt;
107-
params.antiprompt = antiprompt;
108104
params.n_predict = n_predict;
109105
params.n_ctx = n_ctx;
110106
params.top_k = top_k;
@@ -115,9 +111,6 @@ gpt_params init_params(
115111
params.n_threads = n_threads;
116112
params.repeat_last_n = repeat_last_n;
117113
params.n_batch = n_batch;
118-
params.use_color = use_color;
119-
params.interactive = interactive;
120-
params.interactive_start = interactive_start;
121114
return params;
122115
}
123116

@@ -127,8 +120,6 @@ PYBIND11_MODULE(llamacpp, m) {
127120
py::class_<gpt_params>(m, "gpt_params")
128121
.def(py::init<>(&init_params), "Initialize gpt_params",
129122
py::arg("model"),
130-
py::arg("prompt"),
131-
py::arg("antiprompt"),
132123
py::arg("n_ctx"),
133124
py::arg("n_predict"),
134125
py::arg("top_k"),
@@ -138,13 +129,8 @@ PYBIND11_MODULE(llamacpp, m) {
138129
py::arg("seed"),
139130
py::arg("n_threads"),
140131
py::arg("repeat_last_n"),
141-
py::arg("n_batch"),
142-
py::arg("use_color"),
143-
py::arg("interactive"),
144-
py::arg("interactive_start"))
132+
py::arg("n_batch"))
145133
.def_readwrite("model", &gpt_params::model)
146-
.def_readwrite("prompt", &gpt_params::prompt)
147-
.def_readwrite("antiprompt", &gpt_params::antiprompt)
148134
.def_readwrite("n_predict", &gpt_params::n_predict)
149135
.def_readwrite("n_ctx", &gpt_params::n_ctx)
150136
.def_readwrite("top_k", &gpt_params::top_k)
@@ -154,10 +140,7 @@ PYBIND11_MODULE(llamacpp, m) {
154140
.def_readwrite("seed", &gpt_params::seed)
155141
.def_readwrite("n_threads", &gpt_params::n_threads)
156142
.def_readwrite("repeat_last_n", &gpt_params::repeat_last_n)
157-
.def_readwrite("n_batch", &gpt_params::n_batch)
158-
.def_readwrite("use_color", &gpt_params::use_color)
159-
.def_readwrite("interactive", &gpt_params::interactive)
160-
.def_readwrite("interactive_start", &gpt_params::interactive_start);
143+
.def_readwrite("n_batch", &gpt_params::n_batch);
161144

162145
py::class_<PyLLAMA>(m, "PyLLAMA")
163146
.def(py::init<gpt_params>())
@@ -194,6 +177,7 @@ PYBIND11_MODULE(llamacpp, m) {
194177
.def("is_antiprompt_present", &PyLLAMA::is_antiprompt_present, "Check if antiprompt is present")
195178
.def("print_startup_stats", &PyLLAMA::print_startup_stats, "Print startup stats")
196179
.def("print_end_stats", &PyLLAMA::print_end_stats, "Print end stats")
180+
.def("reset_remaining_tokens", &PyLLAMA::reset_remaining_tokens, "Reset remaining tokens")
197181
.def_property("antiprompt", &PyLLAMA::get_antiprompt, &PyLLAMA::set_antiprompt, "Antiprompt")
198182
;
199183

0 commit comments

Comments
 (0)