Skip to content

Commit 2111f87

Browse files
committed
test(platform): cover trace export edge paths
1 parent efd5e35 commit 2111f87

File tree

2 files changed

+228
-1
lines changed

2 files changed

+228
-1
lines changed

src/any_llm/providers/platform/platform.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -412,7 +412,7 @@ def _combine_chunks(self, chunks: list[ChatCompletionChunk]) -> ChatCompletion:
412412
model=last_chunk.model,
413413
created=last_chunk.created,
414414
object="chat.completion",
415-
usage=last_chunk.usage if last_chunk.usage else None,
415+
usage=last_chunk.usage or None,
416416
choices=[],
417417
)
418418

tests/unit/providers/test_platform_provider.py

Lines changed: 227 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2076,3 +2076,230 @@ def test_scoped_trace_export_forwards_and_sanitizes_attributes() -> None:
20762076
platform_utils._active_trace_exports.update(original_active)
20772077
platform_utils._forwarding_processor_holder.clear()
20782078
platform_utils._forwarding_processor_holder.update(original_forward_holder)
2079+
2080+
2081+
def test_require_secure_trace_endpoint_enforces_https_and_localhost(monkeypatch: pytest.MonkeyPatch) -> None:
2082+
from any_llm.providers.platform import utils as platform_utils
2083+
2084+
monkeypatch.setattr(platform_utils, "ANY_LLM_PLATFORM_TRACE_URL", "https://platform.any-llm.ai/v1/traces")
2085+
assert platform_utils._require_secure_trace_endpoint() == "https://platform.any-llm.ai/v1/traces"
2086+
2087+
monkeypatch.setattr(platform_utils, "ANY_LLM_PLATFORM_TRACE_URL", "http://localhost:4318/v1/traces")
2088+
with patch("any_llm.providers.platform.utils.logger.warning") as warning_mock:
2089+
assert platform_utils._require_secure_trace_endpoint() == "http://localhost:4318/v1/traces"
2090+
warning_mock.assert_called_once()
2091+
2092+
monkeypatch.setattr(platform_utils, "ANY_LLM_PLATFORM_TRACE_URL", "http://example.com/v1/traces")
2093+
with pytest.raises(ValueError, match="must use HTTPS"):
2094+
platform_utils._require_secure_trace_endpoint()
2095+
2096+
2097+
def test_sanitize_attribute_mapping_handles_remove_and_redact_paths() -> None:
2098+
from any_llm.providers.platform import utils as platform_utils
2099+
2100+
removable = {
2101+
"gen_ai.request.model": "gpt-4",
2102+
"gen_ai.request.messages": "sensitive",
2103+
}
2104+
platform_utils._sanitize_attribute_mapping(removable)
2105+
assert "gen_ai.request.messages" not in removable
2106+
assert removable["gen_ai.request.model"] == "gpt-4"
2107+
2108+
class SetOnlyMapping:
2109+
def __init__(self) -> None:
2110+
self._data: dict[str, object] = {
2111+
"response.body": "sensitive",
2112+
"latency_ms": 10,
2113+
}
2114+
2115+
def __iter__(self):
2116+
return iter(self._data)
2117+
2118+
def keys(self):
2119+
return self._data.keys()
2120+
2121+
def __getitem__(self, key: str) -> object:
2122+
return self._data[key]
2123+
2124+
def __setitem__(self, key: str, value: object) -> None:
2125+
self._data[key] = value
2126+
2127+
set_only = SetOnlyMapping()
2128+
platform_utils._sanitize_attribute_mapping(set_only)
2129+
assert set_only._data["response.body"] == "[redacted]"
2130+
assert set_only._data["latency_ms"] == 10
2131+
2132+
2133+
def test_get_or_create_forward_processor_caches_by_token() -> None:
2134+
from any_llm.providers.platform import utils as platform_utils
2135+
2136+
original = platform_utils._forward_processors.copy()
2137+
platform_utils._forward_processors.clear()
2138+
try:
2139+
processor = Mock()
2140+
with (
2141+
patch("any_llm.providers.platform.utils.OTLPSpanExporter", return_value=Mock()),
2142+
patch("any_llm.providers.platform.utils.BatchSpanProcessor", return_value=processor),
2143+
):
2144+
first = platform_utils._get_or_create_forward_processor("token-a")
2145+
second = platform_utils._get_or_create_forward_processor("token-a")
2146+
2147+
assert first is processor
2148+
assert second is processor
2149+
finally:
2150+
platform_utils._forward_processors.clear()
2151+
platform_utils._forward_processors.update(original)
2152+
2153+
2154+
def test_platform_scoped_processor_handles_empty_or_untracked_context() -> None:
2155+
from any_llm.providers.platform.utils import PlatformScopedForwardingSpanProcessor
2156+
2157+
processor = PlatformScopedForwardingSpanProcessor()
2158+
span_without_context = Mock()
2159+
span_without_context.context = None
2160+
processor.on_end(span_without_context)
2161+
2162+
span_with_untracked_trace = Mock()
2163+
span_with_untracked_trace.context = Mock(trace_id=999)
2164+
processor.on_end(span_with_untracked_trace)
2165+
2166+
2167+
def test_platform_scoped_processor_shutdown_and_force_flush() -> None:
2168+
from any_llm.providers.platform import utils as platform_utils
2169+
from any_llm.providers.platform.utils import PlatformScopedForwardingSpanProcessor
2170+
2171+
original = platform_utils._forward_processors.copy()
2172+
platform_utils._forward_processors.clear()
2173+
try:
2174+
processor_a = Mock()
2175+
processor_a.force_flush.return_value = True
2176+
processor_b = Mock()
2177+
processor_b.force_flush.return_value = True
2178+
platform_utils._forward_processors["a"] = processor_a
2179+
platform_utils._forward_processors["b"] = processor_b
2180+
2181+
scoped = PlatformScopedForwardingSpanProcessor()
2182+
assert scoped.force_flush(timeout_millis=10)
2183+
scoped.shutdown()
2184+
2185+
processor_a.force_flush.assert_called_once_with(timeout_millis=10)
2186+
processor_b.force_flush.assert_called_once_with(timeout_millis=10)
2187+
processor_a.shutdown.assert_called_once_with()
2188+
processor_b.shutdown.assert_called_once_with()
2189+
assert platform_utils._forward_processors == {}
2190+
finally:
2191+
platform_utils._forward_processors.clear()
2192+
platform_utils._forward_processors.update(original)
2193+
2194+
2195+
def test_activate_deactivate_trace_export_reference_counting() -> None:
2196+
from any_llm.providers.platform import utils as platform_utils
2197+
2198+
original = platform_utils._active_trace_exports.copy()
2199+
platform_utils._active_trace_exports.clear()
2200+
try:
2201+
platform_utils.activate_trace_export(7, "token-a")
2202+
platform_utils.activate_trace_export(7, "token-a")
2203+
assert platform_utils._active_trace_exports[7] == ("token-a", 2)
2204+
2205+
platform_utils.deactivate_trace_export(7)
2206+
assert platform_utils._active_trace_exports[7] == ("token-a", 1)
2207+
2208+
platform_utils.deactivate_trace_export(7)
2209+
assert 7 not in platform_utils._active_trace_exports
2210+
2211+
platform_utils.deactivate_trace_export(999)
2212+
finally:
2213+
platform_utils._active_trace_exports.clear()
2214+
platform_utils._active_trace_exports.update(original)
2215+
2216+
2217+
def test_combine_chunks_without_usage_returns_completion_without_usage(any_llm_key: str) -> None:
2218+
provider_instance = PlatformProvider(api_key=any_llm_key)
2219+
provider_instance.provider = OpenaiProvider
2220+
2221+
chunk = ChatCompletionChunk(
2222+
id="chatcmpl-123",
2223+
model="gpt-4",
2224+
created=1234567890,
2225+
object="chat.completion.chunk",
2226+
choices=[ChunkChoice(index=0, delta=ChoiceDelta(), finish_reason="stop")],
2227+
usage=None,
2228+
)
2229+
2230+
with patch("any_llm.providers.platform.platform.logger.warning") as warning_mock:
2231+
combined = provider_instance._combine_chunks([chunk])
2232+
2233+
warning_mock.assert_called_once()
2234+
assert combined.usage is None
2235+
assert combined.model == "gpt-4"
2236+
2237+
2238+
@pytest.mark.asyncio
2239+
async def test_stream_with_usage_tracking_ends_span_when_stream_yields_no_chunks(any_llm_key: str) -> None:
2240+
provider_instance = PlatformProvider(api_key=any_llm_key)
2241+
llm_span = Mock()
2242+
2243+
async def empty_stream() -> AsyncIterator[ChatCompletionChunk]:
2244+
if False:
2245+
yield
2246+
2247+
result = provider_instance._stream_with_usage_tracking(
2248+
stream=empty_stream(),
2249+
start_time_ns=100,
2250+
request_model="gpt-4",
2251+
conversation_id=None,
2252+
session_label="session",
2253+
user_session_label=None,
2254+
any_llm_key=any_llm_key,
2255+
llm_span=llm_span,
2256+
trace_id=123,
2257+
access_token=None,
2258+
trace_export_activated=False,
2259+
)
2260+
2261+
collected = [chunk async for chunk in result]
2262+
assert collected == []
2263+
llm_span.end.assert_called_once()
2264+
2265+
2266+
@pytest.mark.asyncio
2267+
async def test_acompletion_sets_error_status_and_deactivates_trace_on_exception(
2268+
any_llm_key: str,
2269+
mock_decrypted_provider_key: DecryptedProviderKey,
2270+
) -> None:
2271+
provider_instance = PlatformProvider(api_key=any_llm_key)
2272+
provider_instance.provider = OpenaiProvider
2273+
await _init_provider(provider_instance, mock_decrypted_provider_key)
2274+
2275+
mock_provider = Mock()
2276+
mock_provider.PROVIDER_NAME = "openai"
2277+
mock_provider._acompletion = AsyncMock(side_effect=RuntimeError("boom"))
2278+
provider_instance._provider = mock_provider
2279+
2280+
params = CompletionParams(
2281+
model_id="gpt-4",
2282+
messages=[{"role": "user", "content": "Hello"}],
2283+
stream=False,
2284+
)
2285+
2286+
mock_span = Mock()
2287+
mock_span.get_span_context.return_value = Mock(trace_id=456)
2288+
mock_tracer = Mock()
2289+
mock_tracer.start_span.return_value = mock_span
2290+
mock_provider_tp = Mock()
2291+
mock_provider_tp.get_tracer.return_value = mock_tracer
2292+
2293+
with (
2294+
patch.object(provider_instance.platform_client, "_aensure_valid_token", AsyncMock(return_value="jwt-token")),
2295+
patch("any_llm.providers.platform.platform._get_or_create_tracer_provider", return_value=mock_provider_tp),
2296+
patch("any_llm.providers.platform.platform.activate_trace_export"),
2297+
patch("any_llm.providers.platform.platform.deactivate_trace_export") as deactivate_mock,
2298+
):
2299+
with pytest.raises(RuntimeError, match="boom"):
2300+
await provider_instance._acompletion(params)
2301+
2302+
mock_span.set_attribute.assert_any_call("error.type", "RuntimeError")
2303+
mock_span.set_status.assert_called_once()
2304+
mock_span.end.assert_called_once()
2305+
deactivate_mock.assert_called_once_with(456)

0 commit comments

Comments
 (0)