@@ -3649,20 +3649,30 @@ int main(int argc, char ** argv) {
36493649 oaicompat = true ;
36503650 prompt = body.at (" input" );
36513651 } else if (body.count (" content" ) != 0 ) {
3652- // with "content", we only support single prompt
3653- prompt = std::vector<std::string>{body.at (" content" )};
3652+ prompt = body.at (" content" );
36543653 } else {
36553654 res_error (res, format_error_response (" \" input\" or \" content\" must be provided" , ERROR_TYPE_INVALID_REQUEST));
36563655 return ;
36573656 }
36583657
3658+ // with "content", we only support single prompt
3659+ if (!oaicompat && prompt.type () != json::value_t ::string) {
3660+ res_error (res, format_error_response (" \" content\" must be a string" , ERROR_TYPE_INVALID_REQUEST));
3661+ return ;
3662+ }
3663+
36593664 // create and queue the task
36603665 json responses = json::array ();
36613666 bool error = false ;
36623667 {
36633668 std::vector<server_task> tasks;
36643669 std::vector<llama_tokens> tokenized_prompts = tokenize_input_prompts (ctx_server.ctx , prompt, /* add_special */ false , true );
36653670 for (size_t i = 0 ; i < tokenized_prompts.size (); i++) {
3671+ if (tokenized_prompts[i].size () == 0 ) {
3672+ res_error (res, format_error_response (" input cannot be an empty string" , ERROR_TYPE_INVALID_REQUEST));
3673+ return ;
3674+ }
3675+
36663676 server_task task = server_task (SERVER_TASK_TYPE_EMBEDDING);
36673677 task.id = ctx_server.queue_tasks .get_new_id ();
36683678 task.index = i;
0 commit comments