Skip to content

Commit 52a1237

Browse files
committed
remove logs
1 parent 0fbdb10 commit 52a1237

File tree

2 files changed

+6
-83
lines changed

2 files changed

+6
-83
lines changed

sentry_sdk/consts.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -806,6 +806,7 @@ class OP:
806806
GEN_AI_EMBEDDINGS = "gen_ai.embeddings"
807807
GEN_AI_EXECUTE_TOOL = "gen_ai.execute_tool"
808808
GEN_AI_HANDOFF = "gen_ai.handoff"
809+
GEN_AI_PIPELINE = "gen_ai.pipeline"
809810
GEN_AI_INVOKE_AGENT = "gen_ai.invoke_agent"
810811
GEN_AI_RESPONSES = "gen_ai.responses"
811812
GRAPHQL_EXECUTE = "graphql.execute"

sentry_sdk/integrations/langchain.py

Lines changed: 5 additions & 83 deletions
Original file line numberDiff line numberDiff line change
@@ -302,33 +302,14 @@ def on_chat_model_end(self, response, *, run_id, **kwargs):
302302
# Reference: https://python.langchain.com/docs/how_to/llm_token_usage_tracking/
303303
token_usage = None
304304

305-
# Debug: Log the response structure to understand what's available
306-
logger.debug(
307-
"LangChain response structure: llm_output=%s, has_usage=%s",
308-
bool(response.llm_output),
309-
hasattr(response, "usage"),
310-
)
311-
312305
if response.llm_output and "token_usage" in response.llm_output:
313306
token_usage = response.llm_output["token_usage"]
314-
logger.debug("Found token_usage in llm_output dict: %s", token_usage)
315307
elif response.llm_output and hasattr(response.llm_output, "token_usage"):
316308
token_usage = response.llm_output.token_usage
317-
logger.debug(
318-
"Found token_usage as llm_output attribute: %s", token_usage
319-
)
320309
elif hasattr(response, "usage"):
321-
# Some models might have usage directly on the response (OpenAI-style)
322310
token_usage = response.usage
323-
logger.debug("Found usage on response: %s", token_usage)
324311
elif hasattr(response, "token_usage"):
325-
# Direct token_usage attribute
326312
token_usage = response.token_usage
327-
logger.debug("Found token_usage on response: %s", token_usage)
328-
else:
329-
logger.debug(
330-
"No token usage found in response, will use manual counting"
331-
)
332313

333314
span_data = self.span_map[run_id]
334315
if not span_data:
@@ -384,12 +365,7 @@ def on_chat_model_end(self, response, *, run_id, **kwargs):
384365

385366
def on_llm_new_token(self, token, *, run_id, **kwargs):
386367
# type: (SentryLangchainCallback, str, UUID, Any) -> Any
387-
"""Run on new LLM token. Only available when streaming is enabled.
388-
389-
Note: LangChain documentation mentions that streaming token counts
390-
may not be fully supported for all models. This provides a fallback
391-
for manual counting during streaming.
392-
"""
368+
"""Run on new LLM token. Only available when streaming is enabled."""
393369
with capture_internal_exceptions():
394370
if not run_id or run_id not in self.span_map:
395371
return
@@ -399,11 +375,6 @@ def on_llm_new_token(self, token, *, run_id, **kwargs):
399375
# Count tokens for each streaming chunk
400376
token_count = self.count_tokens(token)
401377
span_data.num_completion_tokens += token_count
402-
logger.debug(
403-
"Streaming token count updated: +%s (total: %s)",
404-
token_count,
405-
span_data.num_completion_tokens,
406-
)
407378

408379
def on_llm_end(self, response, *, run_id, **kwargs):
409380
# type: (SentryLangchainCallback, LLMResult, UUID, Any) -> Any
@@ -412,37 +383,18 @@ def on_llm_end(self, response, *, run_id, **kwargs):
412383
if not run_id:
413384
return
414385

415-
# Extract token usage following LangChain's callback pattern
416-
# Reference: https://python.langchain.com/docs/how_to/llm_token_usage_tracking/
417386
token_usage = None
418-
419-
# Debug: Log the response structure to understand what's available
420-
logger.debug(
421-
"LangChain response structure: llm_output=%s, has_usage=%s",
422-
bool(response.llm_output),
423-
hasattr(response, "usage"),
424-
)
425-
426387
if response.llm_output and "token_usage" in response.llm_output:
427388
token_usage = response.llm_output["token_usage"]
428-
logger.debug("Found token_usage in llm_output dict: %s", token_usage)
389+
429390
elif response.llm_output and hasattr(response.llm_output, "token_usage"):
430391
token_usage = response.llm_output.token_usage
431-
logger.debug(
432-
"Found token_usage as llm_output attribute: %s", token_usage
433-
)
392+
434393
elif hasattr(response, "usage"):
435-
# Some models might have usage directly on the response (OpenAI-style)
436394
token_usage = response.usage
437-
logger.debug("Found usage on response: %s", token_usage)
395+
438396
elif hasattr(response, "token_usage"):
439-
# Direct token_usage attribute
440397
token_usage = response.token_usage
441-
logger.debug("Found token_usage on response: %s", token_usage)
442-
else:
443-
logger.debug(
444-
"No token usage found in response, will use manual counting"
445-
)
446398

447399
span_data = self.span_map[run_id]
448400
if not span_data:
@@ -460,26 +412,13 @@ def on_llm_end(self, response, *, run_id, **kwargs):
460412
input_tokens, output_tokens, total_tokens = (
461413
self._extract_token_usage(token_usage)
462414
)
463-
# Log token usage for debugging (will be removed in production)
464-
logger.debug(
465-
"LangChain token usage found: input=%s, output=%s, total=%s",
466-
input_tokens,
467-
output_tokens,
468-
total_tokens,
469-
)
470415
record_token_usage(
471416
span_data.span,
472417
input_tokens=input_tokens,
473418
output_tokens=output_tokens,
474419
total_tokens=total_tokens,
475420
)
476421
else:
477-
# Fallback to manual token counting when no usage info is available
478-
logger.debug(
479-
"No token usage from LangChain, using manual count: input=%s, output=%s",
480-
span_data.num_prompt_tokens,
481-
span_data.num_completion_tokens,
482-
)
483422
record_token_usage(
484423
span_data.span,
485424
input_tokens=(
@@ -511,24 +450,7 @@ def on_chat_model_error(self, error, *, run_id, **kwargs):
511450
def on_chain_start(self, serialized, inputs, *, run_id, **kwargs):
512451
# type: (SentryLangchainCallback, Dict[str, Any], Dict[str, Any], UUID, Any) -> Any
513452
"""Run when chain starts running."""
514-
with capture_internal_exceptions():
515-
if not run_id:
516-
return
517-
"""watched_span = self._create_span(
518-
run_id,
519-
kwargs.get("parent_run_id"),
520-
# not sure about this one - it kinda spams the UI with a lot of spans
521-
op=OP.GEN_AI_PIPELINE,
522-
name=kwargs.get("name") or "Chain execution",
523-
origin=LangchainIntegration.origin
524-
)
525-
watched_span.span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, kwargs.get("model", kwargs.get("model_name", kwargs.get("model_id"))))
526-
metadata = kwargs.get("metadata")
527-
if metadata:
528-
set_data_normalized(
529-
watched_span.span, SPANDATA.GEN_AI_REQUEST_METADATA, metadata
530-
)
531-
"""
453+
pass
532454

533455
def on_chain_end(self, outputs, *, run_id, **kwargs):
534456
# type: (SentryLangchainCallback, Dict[str, Any], UUID, Any) -> Any

0 commit comments

Comments
 (0)