@@ -109,7 +109,9 @@ class Config:
109109 "/v1/completions" ,
110110 response_model = CreateCompletionResponse ,
111111)
112- def create_completion (request : CreateCompletionRequest , llama : llama_cpp .Llama = Depends (get_llama )):
112+ def create_completion (
113+ request : CreateCompletionRequest , llama : llama_cpp .Llama = Depends (get_llama )
114+ ):
113115 if isinstance (request .prompt , list ):
114116 request .prompt = "" .join (request .prompt )
115117
@@ -153,7 +155,9 @@ class Config:
153155 "/v1/embeddings" ,
154156 response_model = CreateEmbeddingResponse ,
155157)
156- def create_embedding (request : CreateEmbeddingRequest , llama : llama_cpp .Llama = Depends (get_llama )):
158+ def create_embedding (
159+ request : CreateEmbeddingRequest , llama : llama_cpp .Llama = Depends (get_llama )
160+ ):
157161 return llama .create_embedding (** request .dict (exclude = {"model" , "user" }))
158162
159163
@@ -207,7 +211,7 @@ class Config:
207211)
208212def create_chat_completion (
209213 request : CreateChatCompletionRequest ,
210- llama : llama_cpp .Llama = Depends (get_llama ),
214+ llama : llama_cpp .Llama = Depends (get_llama ),
211215) -> Union [llama_cpp .ChatCompletion , EventSourceResponse ]:
212216 completion_or_chunks = llama .create_chat_completion (
213217 ** request .dict (
0 commit comments