Skip to content

Commit 9b2e1b1

Browse files
authored
Log model and usage stats in record.sampling (#1449)
It's often useful to know the token expenditure of running an eval, especially as the number of evals in this repo grows. Example [feature request](#1350), and we also rely on this e.g. [here](https://github.com/openai/evals/tree/main/evals/elsuite/bluff#token-estimates). Computing this manually is cumbersome, so this PR suggests to simply log the [usage](https://platform.openai.com/docs/api-reference/chat/object#chat/object-usage) receipts (for token usage) of each API call in `record.sampling`. This makes it easy for one to sum up the token cost of an eval given a logfile of the run. Here is an example of a resulting `sampling` log line after this change (we add the `data.model` and `data.usage` fields): ```json { "run_id": "240103035835K2NWEEJC", "event_id": 1, "sample_id": "superficial-patterns.dev.8", "type": "sampling", "data": { "prompt": [ { "role": "system", "content": "If the red key goes to the pink door, and the blue key goes to the green door, but you paint the green door to be the color pink, and the pink door to be the color red, and the red key yellow, based on the new colors of everything, which keys go to what doors?" } ], "sampled": [ "Based on the new colors, the yellow key goes to the pink door (previously red), and the blue key goes to the red door (previously pink)." ], "model": "gpt-3.5-turbo-0613", # NEW "usage": { # NEW "completion_tokens": 33, "prompt_tokens": 70, "total_tokens": 103 } }, "created_by": "", "created_at": "2024-01-03 03:58:37.466772+00:00" } ```
1 parent bfe3925 commit 9b2e1b1

File tree

2 files changed

+41
-2
lines changed

2 files changed

+41
-2
lines changed

evals/cli/oaieval.py

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -220,6 +220,7 @@ def to_number(x: str) -> Union[int, float, str]:
220220
**extra_eval_params,
221221
)
222222
result = eval.run(recorder)
223+
add_token_usage_to_result(result, recorder)
223224
recorder.record_final_report(result)
224225

225226
if not (args.dry_run or args.local_run):
@@ -258,6 +259,34 @@ def build_recorder(
258259
)
259260

260261

262+
def add_token_usage_to_result(result: dict[str, Any], recorder: RecorderBase) -> None:
263+
"""
264+
Add token usage from logged sampling events to the result dictionary from the recorder.
265+
"""
266+
usage_events = []
267+
sampling_events = recorder.get_events("sampling")
268+
for event in sampling_events:
269+
if "usage" in event.data:
270+
usage_events.append(dict(event.data["usage"]))
271+
logger.info(f"Found {len(usage_events)}/{len(sampling_events)} sampling events with usage data")
272+
if usage_events:
273+
# Sum up the usage of all samples (assumes the usage is the same for all samples)
274+
total_usage = {
275+
key: sum(u[key] if u[key] is not None else 0 for u in usage_events)
276+
for key in usage_events[0]
277+
}
278+
total_usage_str = "\n".join(f"{key}: {value:,}" for key, value in total_usage.items())
279+
logger.info(f"Token usage from {len(usage_events)} sampling events:\n{total_usage_str}")
280+
for key, value in total_usage.items():
281+
keyname = f"usage_{key}"
282+
if keyname not in result:
283+
result[keyname] = value
284+
else:
285+
logger.warning(
286+
f"Usage key {keyname} already exists in result, not adding {keyname}"
287+
)
288+
289+
261290
def main() -> None:
262291
parser = get_parser()
263292
args = cast(OaiEvalArguments, parser.parse_args(sys.argv[1:]))

evals/completion_fns/openai.py

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,12 @@ def __call__(
8888
**{**kwargs, **self.extra_options},
8989
)
9090
result = OpenAICompletionResult(raw_data=result, prompt=openai_create_prompt)
91-
record_sampling(prompt=result.prompt, sampled=result.get_completions())
91+
record_sampling(
92+
prompt=result.prompt,
93+
sampled=result.get_completions(),
94+
model=result.raw_data.model,
95+
usage=result.raw_data.usage,
96+
)
9297
return result
9398

9499

@@ -133,5 +138,10 @@ def __call__(
133138
**{**kwargs, **self.extra_options},
134139
)
135140
result = OpenAIChatCompletionResult(raw_data=result, prompt=openai_create_prompt)
136-
record_sampling(prompt=result.prompt, sampled=result.get_completions())
141+
record_sampling(
142+
prompt=result.prompt,
143+
sampled=result.get_completions(),
144+
model=result.raw_data.model,
145+
usage=result.raw_data.usage,
146+
)
137147
return result

0 commit comments

Comments
 (0)