Skip to content

Commit 9e87358

Browse files
committed
server : fill usage info in embeddings response
1 parent 4f51968 commit 9e87358

File tree

3 files changed

+42
-5
lines changed

3 files changed

+42
-5
lines changed

examples/server/server.cpp

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -719,14 +719,17 @@ struct server_task_result_embd : server_task_result {
719719
int index = 0;
720720
std::vector<float> embedding;
721721

722+
int32_t n_prompt_tokens;
723+
722724
virtual int get_index() override {
723725
return index;
724726
}
725727

726728
virtual json to_json() override {
727729
return json {
728-
{"index", index},
729-
{"embedding", embedding},
730+
{"index", index},
731+
{"embedding", embedding},
732+
{"tokens_evaluated", n_prompt_tokens},
730733
};
731734
}
732735
};
@@ -1995,6 +1998,7 @@ struct server_context {
19951998
auto res = std::make_unique<server_task_result_embd>();
19961999
res->id = slot.id_task;
19972000
res->index = slot.index;
2001+
res->n_prompt_tokens = slot.n_prompt_tokens;
19982002

19992003
const int n_embd = llama_n_embd(model);
20002004

examples/server/tests/unit/test_embedding.py

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -97,3 +97,33 @@ def test_same_prompt_give_same_result():
9797
vi = res.body['data'][i]['embedding']
9898
for x, y in zip(v0, vi):
9999
assert abs(x - y) < EPSILON
100+
101+
102+
@pytest.mark.parametrize(
103+
"content,n_tokens",
104+
[
105+
("I believe the meaning of life is", 7),
106+
("This is a test", 4),
107+
]
108+
)
109+
def test_embedding_usage_single(content, n_tokens):
110+
global server
111+
server.start()
112+
res = server.make_request("POST", "/embeddings", data={"input": content})
113+
assert res.status_code == 200
114+
assert res.body['usage']['prompt_tokens'] == res.body['usage']['total_tokens']
115+
assert res.body['usage']['prompt_tokens'] == n_tokens
116+
117+
118+
def test_embedding_usage_multiple():
119+
global server
120+
server.start()
121+
res = server.make_request("POST", "/embeddings", data={
122+
"input": [
123+
"I believe the meaning of life is",
124+
"I believe the meaning of life is",
125+
],
126+
})
127+
assert res.status_code == 200
128+
assert res.body['usage']['prompt_tokens'] == res.body['usage']['total_tokens']
129+
assert res.body['usage']['prompt_tokens'] == 2 * 7

examples/server/utils.hpp

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -560,21 +560,24 @@ static json oaicompat_completion_params_parse(
560560

561561
static json format_embeddings_response_oaicompat(const json & request, const json & embeddings) {
562562
json data = json::array();
563+
int32_t n_prompt_tokens = 0;
563564
int i = 0;
564565
for (const auto & elem : embeddings) {
565566
data.push_back(json{
566567
{"embedding", json_value(elem, "embedding", json::array())},
567568
{"index", i++},
568569
{"object", "embedding"}
569570
});
571+
572+
n_prompt_tokens += json_value(elem, "tokens_evaluated", 0);
570573
}
571574

572575
json res = json {
573576
{"model", json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))},
574577
{"object", "list"},
575-
{"usage", json { // TODO: fill
576-
{"prompt_tokens", 0},
577-
{"total_tokens", 0}
578+
{"usage", json {
579+
{"prompt_tokens", n_prompt_tokens},
580+
{"total_tokens", n_prompt_tokens}
578581
}},
579582
{"data", data}
580583
};

0 commit comments

Comments
 (0)