Skip to content

Commit 8903433

Browse files
committed
fixed ruff error
1 parent ae0ab2f commit 8903433

File tree

7 files changed

+136
-64
lines changed

7 files changed

+136
-64
lines changed

instrumentation-genai/opentelemetry-instrumentation-langchain/examples/manual/main.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,8 @@
1414
span_processor = BatchSpanProcessor(OTLPSpanExporter())
1515
trace.get_tracer_provider().add_span_processor(span_processor)
1616

17-
def main():
1817

18+
def main():
1919
# Set up instrumentation
2020
LangChainInstrumentor().instrument()
2121

@@ -43,5 +43,6 @@ def main():
4343
# Un-instrument after use
4444
LangChainInstrumentor().uninstrument()
4545

46+
4647
if __name__ == "__main__":
4748
main()

instrumentation-genai/opentelemetry-instrumentation-langchain/examples/zero-code/main.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,6 @@ def main():
2222
result = llm.invoke(messages).content
2323
print("LLM output:\n", result)
2424

25+
2526
if __name__ == "__main__":
2627
main()
27-

instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/__init__.py

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,9 @@ class LangChainInstrumentor(BaseInstrumentor):
5959
to capture LLM telemetry.
6060
"""
6161

62-
def __init__(self, exception_logger: Optional[Callable[[Exception], Any]] = None):
62+
def __init__(
63+
self, exception_logger: Optional[Callable[[Exception], Any]] = None
64+
):
6365
super().__init__()
6466
Config.exception_logger = exception_logger
6567

@@ -100,10 +102,18 @@ class _BaseCallbackManagerInitWrapper:
100102
Wrap the BaseCallbackManager __init__ to insert custom callback handler in the manager's handlers list.
101103
"""
102104

103-
def __init__(self, callback_handler: OpenTelemetryLangChainCallbackHandler):
105+
def __init__(
106+
self, callback_handler: OpenTelemetryLangChainCallbackHandler
107+
):
104108
self._otel_handler = callback_handler
105109

106-
def __call__(self, wrapped: Callable[..., None], instance: Any, args: tuple[Any, ...], kwargs: dict[str, Any]):
110+
def __call__(
111+
self,
112+
wrapped: Callable[..., None],
113+
instance: Any,
114+
args: tuple[Any, ...],
115+
kwargs: dict[str, Any],
116+
):
107117
wrapped(*args, **kwargs)
108118
# Ensure our OTel callback is present if not already.
109119
for handler in instance.inheritable_handlers:

instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py

Lines changed: 69 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ class _SpanState:
2727
children: List[UUID] = field(default_factory=list)
2828

2929

30-
class OpenTelemetryLangChainCallbackHandler(BaseCallbackHandler): # type: ignore[misc]
30+
class OpenTelemetryLangChainCallbackHandler(BaseCallbackHandler): # type: ignore[misc]
3131
"""
3232
A callback handler for LangChain that uses OpenTelemetry to create spans for LLM calls and chains, tools etc,. in future.
3333
"""
@@ -36,7 +36,7 @@ def __init__(
3636
self,
3737
tracer: Tracer,
3838
) -> None:
39-
super().__init__() # type: ignore
39+
super().__init__() # type: ignore
4040
self._tracer = tracer
4141

4242
# Map from run_id -> _SpanState, to keep track of spans and parent/child relationships
@@ -53,7 +53,9 @@ def _create_span(
5353
if parent_run_id is not None and parent_run_id in self.spans:
5454
parent_span = self.spans[parent_run_id].span
5555
ctx = set_span_in_context(parent_span)
56-
span = self._tracer.start_span(name=span_name, kind=kind, context=ctx)
56+
span = self._tracer.start_span(
57+
name=span_name, kind=kind, context=ctx
58+
)
5759
else:
5860
# top-level or missing parent
5961
span = self._tracer.start_span(name=span_name, kind=kind)
@@ -72,14 +74,16 @@ def _create_llm_span(
7274
parent_run_id: Optional[UUID],
7375
name: str,
7476
) -> Span:
75-
7677
span = self._create_span(
7778
run_id=run_id,
7879
parent_run_id=parent_run_id,
7980
span_name=f"{name}.{GenAI.GenAiOperationNameValues.CHAT.value}",
8081
kind=SpanKind.CLIENT,
8182
)
82-
span.set_attribute(GenAI.GEN_AI_OPERATION_NAME, GenAI.GenAiOperationNameValues.CHAT.value)
83+
span.set_attribute(
84+
GenAI.GEN_AI_OPERATION_NAME,
85+
GenAI.GenAiOperationNameValues.CHAT.value,
86+
)
8387
span.set_attribute(GenAI.GEN_AI_SYSTEM, name)
8488

8589
return span
@@ -99,15 +103,15 @@ def _get_span(self, run_id: UUID) -> Span:
99103

100104
@dont_throw
101105
def on_chat_model_start(
102-
self,
103-
serialized: Dict[str, Any],
104-
messages: List[List[BaseMessage]], # type: ignore
105-
*,
106-
run_id: UUID,
107-
tags: Optional[List[str]] = None,
108-
parent_run_id: Optional[UUID] = None,
109-
metadata: Optional[Dict[str, Any]] = None,
110-
**kwargs: Any,
106+
self,
107+
serialized: Dict[str, Any],
108+
messages: List[List[BaseMessage]], # type: ignore
109+
*,
110+
run_id: UUID,
111+
tags: Optional[List[str]] = None,
112+
parent_run_id: Optional[UUID] = None,
113+
metadata: Optional[Dict[str, Any]] = None,
114+
**kwargs: Any,
111115
) -> None:
112116
name = serialized.get("name") or kwargs.get("name") or "ChatLLM"
113117
span = self._create_llm_span(
@@ -126,18 +130,23 @@ def on_chat_model_start(
126130
span.set_attribute(GenAI.GEN_AI_REQUEST_TOP_P, top_p)
127131
frequency_penalty = invocation_params.get("frequency_penalty")
128132
if frequency_penalty is not None:
129-
span.set_attribute(GenAI.GEN_AI_REQUEST_FREQUENCY_PENALTY, frequency_penalty)
133+
span.set_attribute(
134+
GenAI.GEN_AI_REQUEST_FREQUENCY_PENALTY, frequency_penalty
135+
)
130136
presence_penalty = invocation_params.get("presence_penalty")
131137
if presence_penalty is not None:
132-
span.set_attribute(GenAI.GEN_AI_REQUEST_PRESENCE_PENALTY, presence_penalty)
138+
span.set_attribute(
139+
GenAI.GEN_AI_REQUEST_PRESENCE_PENALTY, presence_penalty
140+
)
133141
stop_sequences = invocation_params.get("stop")
134142
if stop_sequences is not None:
135-
span.set_attribute(GenAI.GEN_AI_REQUEST_STOP_SEQUENCES, stop_sequences)
143+
span.set_attribute(
144+
GenAI.GEN_AI_REQUEST_STOP_SEQUENCES, stop_sequences
145+
)
136146
seed = invocation_params.get("seed")
137147
if seed is not None:
138148
span.set_attribute(GenAI.GEN_AI_REQUEST_SEED, seed)
139149

140-
141150
if metadata is not None:
142151
max_tokens = metadata.get("ls_max_tokens")
143152
if max_tokens is not None:
@@ -148,35 +157,45 @@ def on_chat_model_start(
148157
span.set_attribute("gen_ai.provider.name", provider)
149158
temperature = metadata.get("ls_temperature")
150159
if temperature is not None:
151-
span.set_attribute(GenAI.GEN_AI_REQUEST_TEMPERATURE, temperature)
160+
span.set_attribute(
161+
GenAI.GEN_AI_REQUEST_TEMPERATURE, temperature
162+
)
152163

153164
@dont_throw
154165
def on_llm_end(
155-
self,
156-
response: LLMResult, # type: ignore
157-
*,
158-
run_id: UUID,
159-
parent_run_id: Optional[UUID] = None,
160-
**kwargs: Any,
166+
self,
167+
response: LLMResult, # type: ignore
168+
*,
169+
run_id: UUID,
170+
parent_run_id: Optional[UUID] = None,
171+
**kwargs: Any,
161172
) -> None:
162173
span = self._get_span(run_id)
163174

164175
finish_reasons: List[str] = []
165-
for generation in getattr(response, "generations", []): # type: ignore
176+
for generation in getattr(response, "generations", []): # type: ignore
166177
for chat_generation in generation:
167-
generation_info = getattr(chat_generation, "generation_info", None)
178+
generation_info = getattr(
179+
chat_generation, "generation_info", None
180+
)
168181
if generation_info is not None:
169182
finish_reason = generation_info.get("finish_reason")
170183
if finish_reason is not None:
171184
finish_reasons.append(str(finish_reason) or "error")
172185

173-
span.set_attribute(GenAI.GEN_AI_RESPONSE_FINISH_REASONS, finish_reasons)
186+
span.set_attribute(
187+
GenAI.GEN_AI_RESPONSE_FINISH_REASONS, finish_reasons
188+
)
174189

175-
llm_output = getattr(response, "llm_output", None) # type: ignore
190+
llm_output = getattr(response, "llm_output", None) # type: ignore
176191
if llm_output is not None:
177-
response_model = llm_output.get("model_name") or llm_output.get("model")
192+
response_model = llm_output.get("model_name") or llm_output.get(
193+
"model"
194+
)
178195
if response_model is not None:
179-
span.set_attribute(GenAI.GEN_AI_RESPONSE_MODEL, str(response_model))
196+
span.set_attribute(
197+
GenAI.GEN_AI_RESPONSE_MODEL, str(response_model)
198+
)
180199

181200
response_id = llm_output.get("id")
182201
if response_id is not None:
@@ -187,27 +206,35 @@ def on_llm_end(
187206
if usage:
188207
prompt_tokens = usage.get("prompt_tokens", 0)
189208
completion_tokens = usage.get("completion_tokens", 0)
190-
span.set_attribute(GenAI.GEN_AI_USAGE_INPUT_TOKENS,
191-
int(prompt_tokens) if prompt_tokens is not None else 0)
192-
span.set_attribute(GenAI.GEN_AI_USAGE_OUTPUT_TOKENS,
193-
int(completion_tokens) if completion_tokens is not None else 0)
209+
span.set_attribute(
210+
GenAI.GEN_AI_USAGE_INPUT_TOKENS,
211+
int(prompt_tokens) if prompt_tokens is not None else 0,
212+
)
213+
span.set_attribute(
214+
GenAI.GEN_AI_USAGE_OUTPUT_TOKENS,
215+
int(completion_tokens)
216+
if completion_tokens is not None
217+
else 0,
218+
)
194219

195220
# End the LLM span
196221
self._end_span(run_id)
197222

198223
@dont_throw
199224
def on_llm_error(
200-
self,
201-
error: BaseException,
202-
*,
203-
run_id: UUID,
204-
parent_run_id: Optional[UUID] = None,
205-
**kwargs: Any,
225+
self,
226+
error: BaseException,
227+
*,
228+
run_id: UUID,
229+
parent_run_id: Optional[UUID] = None,
230+
**kwargs: Any,
206231
) -> None:
207232
self._handle_error(error, run_id)
208233

209234
def _handle_error(self, error: BaseException, run_id: UUID):
210235
span = self._get_span(run_id)
211236
span.set_status(Status(StatusCode.ERROR, str(error)))
212-
span.set_attribute(ErrorAttributes.ERROR_TYPE, type(error).__qualname__)
237+
span.set_attribute(
238+
ErrorAttributes.ERROR_TYPE, type(error).__qualname__
239+
)
213240
self._end_span(run_id)

instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/utils.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,11 +20,13 @@
2020

2121
F = TypeVar("F", bound=Callable[..., Any])
2222

23+
2324
def dont_throw(func: F) -> F:
2425
"""
2526
Decorator that catches and logs exceptions, rather than re-raising them,
2627
to avoid interfering with user code if instrumentation fails.
2728
"""
29+
2830
def wrapper(*args: Any, **kwargs: Any) -> Optional[Any]:
2931
try:
3032
return func(*args, **kwargs)
@@ -35,7 +37,9 @@ def wrapper(*args: Any, **kwargs: Any) -> Optional[Any]:
3537
traceback.format_exc(),
3638
)
3739
from opentelemetry.instrumentation.langchain.config import Config
40+
3841
if Config.exception_logger:
3942
Config.exception_logger(e)
4043
return None
44+
4145
return wrapper # type: ignore

instrumentation-genai/opentelemetry-instrumentation-langchain/tests/conftest.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,12 +20,14 @@ def fixture_span_exporter():
2020
exporter = InMemorySpanExporter()
2121
yield exporter
2222

23+
2324
@pytest.fixture(scope="function", name="tracer_provider")
2425
def fixture_tracer_provider(span_exporter):
2526
provider = TracerProvider()
2627
provider.add_span_processor(SimpleSpanProcessor(span_exporter))
2728
return provider
2829

30+
2931
@pytest.fixture(scope="function")
3032
def start_instrumentation(
3133
tracer_provider,
@@ -38,6 +40,7 @@ def start_instrumentation(
3840
yield instrumentor
3941
instrumentor.uninstrument()
4042

43+
4144
@pytest.fixture(autouse=True)
4245
def environment():
4346
if not os.getenv("OPENAI_API_KEY"):
@@ -48,6 +51,7 @@ def environment():
4851
def chatOpenAI_client():
4952
return ChatOpenAI()
5053

54+
5155
@pytest.fixture(scope="module")
5256
def vcr_config():
5357
return {
@@ -61,10 +65,12 @@ def vcr_config():
6165
"before_record_response": scrub_response_headers,
6266
}
6367

68+
6469
class LiteralBlockScalar(str):
6570
"""Formats the string as a literal block scalar, preserving whitespace and
6671
without interpreting escape characters"""
6772

73+
6874
def literal_block_scalar_presenter(dumper, data):
6975
"""Represents a scalar string as a literal block, via '|' syntax"""
7076
return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|")
@@ -134,4 +140,3 @@ def scrub_response_headers(response):
134140
response["headers"]["openai-organization"] = "test_openai_org_id"
135141
response["headers"]["Set-Cookie"] = "test_set_cookie"
136142
return response
137-

0 commit comments

Comments
 (0)