Skip to content

Commit d2f88a1

Browse files
authored
chore: telemetry test (#3405)
# What does this PR do? - removed fixed-duration sleeps ## Test Plan
1 parent d4e45cd commit d2f88a1

File tree

3 files changed

+10
-17
lines changed

3 files changed

+10
-17
lines changed

tests/integration/telemetry/test_openai_telemetry.py

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -49,16 +49,13 @@ def setup_openai_telemetry_data(llama_stack_client, text_model_id):
4949
traces = llama_stack_client.telemetry.query_traces(limit=10)
5050
if len(traces) >= 5: # 5 OpenAI completion traces
5151
break
52-
time.sleep(1)
52+
time.sleep(0.1)
5353

5454
if len(traces) < 5:
5555
pytest.fail(
5656
f"Failed to create sufficient OpenAI completion telemetry data after 30s. Got {len(traces)} traces."
5757
)
5858

59-
# Wait for 5 seconds to ensure traces has completed logging
60-
time.sleep(5)
61-
6259
yield
6360

6461

@@ -185,11 +182,13 @@ def test_openai_completion_creates_telemetry(llama_stack_client, text_model_id):
185182
assert len(response.choices) > 0, "Response should have at least one choice"
186183

187184
# Wait for telemetry to be recorded
188-
time.sleep(3)
189-
190-
# Check that we have more traces now
191-
final_traces = llama_stack_client.telemetry.query_traces(limit=20)
192-
final_count = len(final_traces)
185+
start_time = time.time()
186+
while time.time() - start_time < 30:
187+
final_traces = llama_stack_client.telemetry.query_traces(limit=20)
188+
final_count = len(final_traces)
189+
if final_count > initial_count:
190+
break
191+
time.sleep(0.1)
193192

194193
# Should have at least as many traces as before (might have more due to other activity)
195194
assert final_count >= initial_count, "Should have at least as many traces after OpenAI call"

tests/integration/telemetry/test_telemetry.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -42,14 +42,11 @@ def setup_telemetry_data(llama_stack_client, text_model_id):
4242
traces = llama_stack_client.telemetry.query_traces(limit=10)
4343
if len(traces) >= 4:
4444
break
45-
time.sleep(1)
45+
time.sleep(0.1)
4646

4747
if len(traces) < 4:
4848
pytest.fail(f"Failed to create sufficient telemetry data after 30s. Got {len(traces)} traces.")
4949

50-
# Wait for 5 seconds to ensure traces has completed logging
51-
time.sleep(5)
52-
5350
yield
5451

5552

tests/integration/telemetry/test_telemetry_metrics.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -46,10 +46,7 @@ def setup_telemetry_metrics_data(openai_client, client_with_models, text_model_i
4646
break
4747
except Exception:
4848
pass
49-
time.sleep(1)
50-
51-
# Wait additional time to ensure all metrics are processed
52-
time.sleep(5)
49+
time.sleep(0.1)
5350

5451
# Return the token lists for use in tests
5552
return {"prompt_tokens": prompt_tokens, "completion_tokens": completion_tokens, "total_tokens": total_tokens}

0 commit comments

Comments
 (0)