Skip to content

Commit e24e1f1

Browse files
committed
elastic-opentelemetry-instrumentation-openai: test against baseline
1.26.0 is required for having usage data with streams but with some small changes we can keep the baseline as old as 1.2.0.
1 parent 1638cbb commit e24e1f1

File tree

6 files changed

+24
-10
lines changed

6 files changed

+24
-10
lines changed

.github/workflows/ci.yml

Lines changed: 13 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -43,11 +43,15 @@ jobs:
4343
py310: "3.10"
4444
py311: "3.11"
4545
py312: "3.12"
46+
# Baseline version of openai client we support
47+
baseline: "1.2.0"
48+
latest: ""
49+
working_dir: "instrumentation/elastic-opentelemetry-instrumentation-openai"
4650
strategy:
4751
fail-fast: false
4852
matrix:
49-
working-dir: ['instrumentation/elastic-opentelemetry-instrumentation-openai']
5053
python-version: [py38, py39, py310, py311, py312]
54+
openai-version: [baseline, latest]
5155
steps:
5256
- uses: actions/checkout@v4
5357
- name: Set up Python ${{ env[matrix.python-version] }}
@@ -57,9 +61,14 @@ jobs:
5761
architecture: "x64"
5862
- if: ${{ env[matrix.python-version] == '3.8' || env[matrix.python-version] == '3.9' }}
5963
run: pip install -r dev-requirements-3.9.txt
60-
working-directory: ${{ matrix.working-dir }}
64+
working-directory: ${{ env.working_dir }}
6165
- if: ${{ env[matrix.python-version] != '3.8' && env[matrix.python-version] != '3.9' }}
6266
run: pip install -r dev-requirements.txt
63-
working-directory: ${{ matrix.working-dir }}
67+
working-directory: ${{ env.working_dir }}
68+
- if: ${{ env[matrix.openai-version] }}
69+
name: update openai to required version if not latest
70+
run:
71+
pip install openai==${{ env[matrix.openai-version] }}
72+
working-directory: ${{ env.working_dir }}
6473
- run: pytest
65-
working-directory: ${{ matrix.working-dir }}
74+
working-directory: ${{ env.working_dir }}

instrumentation/elastic-opentelemetry-instrumentation-openai/pyproject.toml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@ dependencies = [
2929
"opentelemetry-api ~= 1.28.1",
3030
"opentelemetry-instrumentation ~= 0.49b1",
3131
"opentelemetry-semantic-conventions ~= 0.49b1",
32+
"wrapt >= 1.0.0, < 2.0.0",
3233
]
3334

3435
[project.readme]
@@ -42,7 +43,7 @@ Homepage = "https://github.com/elastic/elastic-otel-python-instrumentations"
4243
[project.optional-dependencies]
4344
dev = ["pytest", "pip-tools", "openai", "numpy", "opentelemetry-test-utils", "vcrpy", "pytest-asyncio", "pytest-vcr"]
4445
instruments = [
45-
"openai >= 1.0.0",
46+
"openai >= 1.2.0",
4647
]
4748

4849
[project.entry-points.opentelemetry_instrumentor]

instrumentation/elastic-opentelemetry-instrumentation-openai/src/opentelemetry/instrumentation/openai/package.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,4 +14,4 @@
1414
# See the License for the specific language governing permissions and
1515
# limitations under the License.
1616

17-
_instruments = ("openai >= 1.0.0",)
17+
_instruments = ("openai >= 1.2.0",)

instrumentation/elastic-opentelemetry-instrumentation-openai/src/opentelemetry/instrumentation/openai/wrappers.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,9 @@ def end(self, exc=None):
104104
def process_chunk(self, chunk):
105105
self.response_id = chunk.id
106106
self.model = chunk.model
107-
self.usage = chunk.usage
107+
# usage with streaming is available since 1.26.0
108+
if hasattr(chunk, "usage"):
109+
self.usage = chunk.usage
108110
# with `include_usage` in `stream_options` we will get a last chunk without choices
109111
if chunk.choices:
110112
self.choices += chunk.choices

instrumentation/elastic-opentelemetry-instrumentation-openai/tests/test_chat_completions.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,8 @@
4848
)
4949
from .utils import get_sorted_metrics, logrecords_from_logs
5050

51+
OPENAI_VERSION = tuple([int(x) for x in openai.version.VERSION.split(".")])
52+
5153
providers = ["openai_provider_chat_completions", "ollama_provider_chat_completions", "azure_provider_chat_completions"]
5254

5355

@@ -1288,7 +1290,7 @@ def test_stream(
12881290
),
12891291
]
12901292

1291-
1293+
@pytest.mark.skipif(OPENAI_VERSION < (1, 26, 0), reason="stream_options added in 1.26.0")
12921294
@pytest.mark.vcr()
12931295
@pytest.mark.parametrize(
12941296
"provider_str,model,response_model,content,response_id,input_tokens,output_tokens,duration",

instrumentation/elastic-opentelemetry-instrumentation-openai/tests/test_embeddings.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -288,8 +288,8 @@ async def test_async_all_the_client_options(
288288

289289
test_async_connection_error_test_data = [
290290
("openai_provider_embeddings", "text-embedding-3-small", 0.2263190783560276),
291-
("azure_provider_embeddings", "text-embedding-3-small", 0.0036478489999751673),
292-
("ollama_provider_embeddings", "all-minilm:33m", 0.0030461717396974564),
291+
("azure_provider_embeddings", "text-embedding-3-small", 1.0062104639999916),
292+
("ollama_provider_embeddings", "all-minilm:33m", 1.0148218229999770),
293293
]
294294

295295

0 commit comments

Comments
 (0)