|
4 | 4 | import azure.identity
|
5 | 5 | import openai
|
6 | 6 | from dotenv import load_dotenv
|
7 |
| -from messages import MESSAGE_COUNTS # type: ignore[import-not-found] |
| 7 | +from image_messages import IMAGE_MESSAGE_COUNTS |
| 8 | +from messages import MESSAGE_COUNTS |
8 | 9 |
|
9 | 10 | # Setup the OpenAI client to use either Azure OpenAI or OpenAI API
|
10 | 11 | load_dotenv()
|
|
38 | 39 | MODEL_NAME = openai_model
|
39 | 40 |
|
40 | 41 | # Test the token count for each message
|
| 42 | + |
41 | 43 | for message_count_pair in MESSAGE_COUNTS:
|
| 44 | + for model, expected_tokens in [("gpt-4o", message_count_pair["count_omni"])]: |
| 45 | + message = message_count_pair["message"] |
| 46 | + expected_tokens = message_count_pair["count"] |
| 47 | + response = client.chat.completions.create( |
| 48 | + model=MODEL_NAME, |
| 49 | + temperature=0.7, |
| 50 | + n=1, |
| 51 | + messages=[message], # type: ignore[list-item] |
| 52 | + ) |
| 53 | + |
| 54 | + print(message) |
| 55 | + assert response.usage is not None, "Expected usage to be present" |
| 56 | + assert ( |
| 57 | + response.usage.prompt_tokens == expected_tokens |
| 58 | + ), f"Expected {expected_tokens} tokens, got {response.usage.prompt_tokens} for model {MODEL_NAME}" |
| 59 | + |
| 60 | + |
| 61 | +for message_count_pair in IMAGE_MESSAGE_COUNTS: |
42 | 62 | for model, expected_tokens in [
|
43 |
| - (MODEL_NAME, message_count_pair["count"]), |
44 |
| - ("gpt-4o", message_count_pair["count_omni"]), |
| 63 | + ("gpt-4o", message_count_pair["count"]), |
| 64 | + ("gpt-4o-mini", message_count_pair["count_4o_mini"]), |
45 | 65 | ]:
|
46 | 66 | response = client.chat.completions.create(
|
47 | 67 | model=model,
|
|
50 | 70 | messages=[message_count_pair["message"]], # type: ignore[list-item]
|
51 | 71 | )
|
52 | 72 |
|
53 |
| - print(message_count_pair["message"]) |
54 | 73 | assert response.usage is not None, "Expected usage to be present"
|
55 | 74 | assert (
|
56 | 75 | response.usage.prompt_tokens == expected_tokens
|
|
0 commit comments