|
1 | 1 | import os
|
2 | 2 |
|
3 | 3 | import pytest
|
| 4 | +from langchain_core.outputs import GenerationChunk |
4 | 5 |
|
5 | 6 | from langchain_openai import OpenAI
|
| 7 | +from langchain_openai.llms.base import _stream_response_to_generation_chunk |
6 | 8 |
|
7 | 9 | os.environ["OPENAI_API_KEY"] = "foo"
|
8 | 10 |
|
@@ -69,3 +71,38 @@ def token_encoder(text: str) -> list[int]:
|
69 | 71 |
|
70 | 72 | llm = OpenAI(custom_get_token_ids=token_encoder)
|
71 | 73 | assert llm.get_token_ids("foo") == [1, 2, 3]
|
| 74 | + |
| 75 | + |
| 76 | +def test_stream_response_to_generation_chunk() -> None: |
| 77 | + completion = { |
| 78 | + "id": "cmpl-abc123", |
| 79 | + "choices": [ |
| 80 | + {"finish_reason": None, "index": 0, "logprobs": None, "text": "foo"} |
| 81 | + ], |
| 82 | + "created": 1749214401, |
| 83 | + "model": "my-model", |
| 84 | + "object": "text_completion", |
| 85 | + "system_fingerprint": None, |
| 86 | + "usage": None, |
| 87 | + } |
| 88 | + chunk = _stream_response_to_generation_chunk(completion) |
| 89 | + assert chunk == GenerationChunk( |
| 90 | + text="foo", generation_info={"finish_reason": None, "logprobs": None} |
| 91 | + ) |
| 92 | + |
| 93 | + # Pathological completion with None text (e.g., from other providers) |
| 94 | + completion = { |
| 95 | + "id": "cmpl-abc123", |
| 96 | + "choices": [ |
| 97 | + {"finish_reason": None, "index": 0, "logprobs": None, "text": None} |
| 98 | + ], |
| 99 | + "created": 1749214401, |
| 100 | + "model": "my-model", |
| 101 | + "object": "text_completion", |
| 102 | + "system_fingerprint": None, |
| 103 | + "usage": None, |
| 104 | + } |
| 105 | + chunk = _stream_response_to_generation_chunk(completion) |
| 106 | + assert chunk == GenerationChunk( |
| 107 | + text="", generation_info={"finish_reason": None, "logprobs": None} |
| 108 | + ) |
0 commit comments