@@ -74,6 +74,9 @@ class PyLLAMA {
74
74
return llama_is_anti_prompt_present (*ctx_ptr, antiprompt_inp);
75
75
}
76
76
}
77
+ void reset_remaining_tokens () {
78
+ llama_reset_remaining_tokens (*ctx_ptr);
79
+ }
77
80
void print_startup_stats () {
78
81
llama_print_startup_stats (*ctx_ptr);
79
82
}
@@ -85,8 +88,6 @@ class PyLLAMA {
85
88
// Write python bindings for gpt_params
86
89
gpt_params init_params (
87
90
const std::string& model,
88
- const std::string& prompt,
89
- const std::string& antiprompt,
90
91
int32_t n_ctx,
91
92
int32_t n_predict,
92
93
int32_t top_k,
@@ -96,15 +97,10 @@ gpt_params init_params(
96
97
int32_t seed,
97
98
int32_t n_threads,
98
99
int32_t repeat_last_n,
99
- int32_t n_batch,
100
- bool use_color,
101
- bool interactive,
102
- bool interactive_start
100
+ int32_t n_batch
103
101
) {
104
102
gpt_params params{};
105
103
params.model = model;
106
- params.prompt = prompt;
107
- params.antiprompt = antiprompt;
108
104
params.n_predict = n_predict;
109
105
params.n_ctx = n_ctx;
110
106
params.top_k = top_k;
@@ -115,9 +111,6 @@ gpt_params init_params(
115
111
params.n_threads = n_threads;
116
112
params.repeat_last_n = repeat_last_n;
117
113
params.n_batch = n_batch;
118
- params.use_color = use_color;
119
- params.interactive = interactive;
120
- params.interactive_start = interactive_start;
121
114
return params;
122
115
}
123
116
@@ -127,8 +120,6 @@ PYBIND11_MODULE(llamacpp, m) {
127
120
py::class_<gpt_params>(m, " gpt_params" )
128
121
.def (py::init<>(&init_params), " Initialize gpt_params" ,
129
122
py::arg (" model" ),
130
- py::arg (" prompt" ),
131
- py::arg (" antiprompt" ),
132
123
py::arg (" n_ctx" ),
133
124
py::arg (" n_predict" ),
134
125
py::arg (" top_k" ),
@@ -138,13 +129,8 @@ PYBIND11_MODULE(llamacpp, m) {
138
129
py::arg (" seed" ),
139
130
py::arg (" n_threads" ),
140
131
py::arg (" repeat_last_n" ),
141
- py::arg (" n_batch" ),
142
- py::arg (" use_color" ),
143
- py::arg (" interactive" ),
144
- py::arg (" interactive_start" ))
132
+ py::arg (" n_batch" ))
145
133
.def_readwrite (" model" , &gpt_params::model)
146
- .def_readwrite (" prompt" , &gpt_params::prompt)
147
- .def_readwrite (" antiprompt" , &gpt_params::antiprompt)
148
134
.def_readwrite (" n_predict" , &gpt_params::n_predict)
149
135
.def_readwrite (" n_ctx" , &gpt_params::n_ctx)
150
136
.def_readwrite (" top_k" , &gpt_params::top_k)
@@ -154,10 +140,7 @@ PYBIND11_MODULE(llamacpp, m) {
154
140
.def_readwrite (" seed" , &gpt_params::seed)
155
141
.def_readwrite (" n_threads" , &gpt_params::n_threads)
156
142
.def_readwrite (" repeat_last_n" , &gpt_params::repeat_last_n)
157
- .def_readwrite (" n_batch" , &gpt_params::n_batch)
158
- .def_readwrite (" use_color" , &gpt_params::use_color)
159
- .def_readwrite (" interactive" , &gpt_params::interactive)
160
- .def_readwrite (" interactive_start" , &gpt_params::interactive_start);
143
+ .def_readwrite (" n_batch" , &gpt_params::n_batch);
161
144
162
145
py::class_<PyLLAMA>(m, " PyLLAMA" )
163
146
.def (py::init<gpt_params>())
@@ -194,6 +177,7 @@ PYBIND11_MODULE(llamacpp, m) {
194
177
.def (" is_antiprompt_present" , &PyLLAMA::is_antiprompt_present, " Check if antiprompt is present" )
195
178
.def (" print_startup_stats" , &PyLLAMA::print_startup_stats, " Print startup stats" )
196
179
.def (" print_end_stats" , &PyLLAMA::print_end_stats, " Print end stats" )
180
+ .def (" reset_remaining_tokens" , &PyLLAMA::reset_remaining_tokens, " Reset remaining tokens" )
197
181
.def_property (" antiprompt" , &PyLLAMA::get_antiprompt, &PyLLAMA::set_antiprompt, " Antiprompt" )
198
182
;
199
183
0 commit comments