@@ -1657,7 +1657,7 @@ struct server_context {
16571657 slot.params = std::move (task.params );
16581658 slot.prompt_tokens = std::move (task.prompt_tokens );
16591659
1660- SLT_DBG (slot, " launching slot : %s\n " , slot.to_json (). dump ( ).c_str ());
1660+ SLT_DBG (slot, " launching slot : %s\n " , safe_json_to_str ( slot.to_json ()).c_str ());
16611661
16621662 if (slot.n_predict > 0 && slot.params .n_predict > slot.n_predict ) {
16631663 // Might be better to reject the request with a 400 ?
@@ -2942,12 +2942,12 @@ int main(int argc, char ** argv) {
29422942
29432943 auto res_error = [](httplib::Response & res, const json & error_data) {
29442944 json final_response {{" error" , error_data}};
2945- res.set_content (final_response. dump (- 1 , ' ' , false , json:: error_handler_t ::replace ), MIMETYPE_JSON);
2945+ res.set_content (safe_json_to_str (final_response ), MIMETYPE_JSON);
29462946 res.status = json_value (error_data, " code" , 500 );
29472947 };
29482948
29492949 auto res_ok = [](httplib::Response & res, const json & data) {
2950- res.set_content (data. dump (- 1 , ' ' , false , json:: error_handler_t ::replace ), MIMETYPE_JSON);
2950+ res.set_content (safe_json_to_str (data ), MIMETYPE_JSON);
29512951 res.status = 200 ;
29522952 };
29532953
@@ -3524,7 +3524,7 @@ int main(int argc, char ** argv) {
35243524 /* oaicompat_chat */ true );
35253525 };
35263526
3527- const auto handle_models = [¶ms, &ctx_server](const httplib::Request &, httplib::Response & res) {
3527+ const auto handle_models = [¶ms, &ctx_server, &res_ok ](const httplib::Request &, httplib::Response & res) {
35283528 json models = {
35293529 {" object" , " list" },
35303530 {" data" , {
@@ -3538,7 +3538,7 @@ int main(int argc, char ** argv) {
35383538 }}
35393539 };
35403540
3541- res. set_content (models. dump (), MIMETYPE_JSON );
3541+ res_ok (res, models );
35423542 };
35433543
35443544 const auto handle_tokenize = [&ctx_server, &res_ok](const httplib::Request & req, httplib::Response & res) {
0 commit comments