Skip to content

Commit 85a2041

Browse files
authored
Revert "fix: Duplicate LLM calls (#981)" (#997)
This reverts commit 63fdd7f.
1 parent f92a115 commit 85a2041

File tree

1 file changed

+47
-81
lines changed

1 file changed

+47
-81
lines changed

agentops/instrumentation/crewai/instrumentation.py

Lines changed: 47 additions & 81 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515
from agentops.semconv import SpanAttributes, AgentOpsSpanKindValues, Meters, ToolAttributes, MessageAttributes
1616
from .crewai_span_attributes import CrewAISpanAttributes, set_span_attribute
1717

18-
1918
# Initialize logger
2019
logger = logging.getLogger(__name__)
2120

@@ -411,91 +410,58 @@ def wrap_task_execute(
411410
def wrap_llm_call(
412411
tracer, duration_histogram, token_histogram, environment, application_name, wrapped, instance, args, kwargs
413412
):
414-
try:
415-
llm = instance.model if hasattr(instance, "model") else "llm"
416-
# To get the model provider (e.g. "openai") or the model name (e.g. "gpt-4o-mini")
417-
provider = llm.split("/")[0] if "/" in llm else llm.split("-")[0]
418-
419-
provider_instrumentor = {
420-
"gpt": "OpenAIInstrumentor",
421-
"openai": "OpenAIInstrumentor",
422-
"claude": "AnthropicInstrumentor",
423-
"anthropic": "AnthropicInstrumentor",
424-
"google": "GoogleGenerativeAIInstrumentor",
425-
"gemini": "GoogleGenerativeAIInstrumentor",
426-
"ibm": "IBMWatsonXInstrumentor",
427-
"watsonx": "IBMWatsonXInstrumentor",
428-
"agents": "OpenAIAgentsInstrumentor",
429-
}
430-
431-
instrumentor = provider_instrumentor.get(provider.lower())
432-
433-
if instrumentor:
434-
logger.debug(f"Skipping instrumentation for CrewAI LLM call for {provider} and using {instrumentor}")
435-
result = wrapped(*args, **kwargs)
436-
return result
437-
else:
438-
logger.debug(f"Instrumenting CrewAI LLM call for provider: {provider}")
439-
with tracer.start_as_current_span(f"{llm}.llm", kind=SpanKind.CLIENT, attributes={}) as span:
440-
start_time = time.time()
441-
try:
442-
span.set_attribute(TELEMETRY_SDK_NAME, "agentops")
443-
span.set_attribute(SERVICE_NAME, application_name)
444-
span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment)
413+
llm = instance.model if hasattr(instance, "model") else "llm"
414+
with tracer.start_as_current_span(f"{llm}.llm", kind=SpanKind.CLIENT, attributes={}) as span:
415+
start_time = time.time()
416+
try:
417+
span.set_attribute(TELEMETRY_SDK_NAME, "agentops")
418+
span.set_attribute(SERVICE_NAME, application_name)
419+
span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment)
445420

446-
CrewAISpanAttributes(span=span, instance=instance)
421+
CrewAISpanAttributes(span=span, instance=instance)
447422

448-
result = wrapped(*args, **kwargs)
423+
result = wrapped(*args, **kwargs)
449424

450-
# Set prompt attributes from args
451-
if args and isinstance(args[0], list):
452-
for i, message in enumerate(args[0]):
453-
if isinstance(message, dict):
454-
if "role" in message:
455-
span.set_attribute(MessageAttributes.PROMPT_ROLE.format(i=i), message["role"])
456-
if "content" in message:
457-
span.set_attribute(MessageAttributes.PROMPT_CONTENT.format(i=i), message["content"])
458-
459-
# Set completion attributes from result
460-
if result:
461-
span.set_attribute(MessageAttributes.COMPLETION_CONTENT.format(i=0), str(result))
462-
span.set_attribute(MessageAttributes.COMPLETION_ROLE.format(i=0), "assistant")
463-
464-
# Set token usage attributes from callbacks
465-
if (
466-
"callbacks" in kwargs
467-
and kwargs["callbacks"]
468-
and hasattr(kwargs["callbacks"][0], "token_cost_process")
469-
):
470-
token_process = kwargs["callbacks"][0].token_cost_process
471-
if hasattr(token_process, "completion_tokens"):
472-
span.set_attribute(
473-
SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, token_process.completion_tokens
474-
)
475-
if hasattr(token_process, "prompt_tokens"):
476-
span.set_attribute(SpanAttributes.LLM_USAGE_PROMPT_TOKENS, token_process.prompt_tokens)
477-
if hasattr(token_process, "total_tokens"):
478-
span.set_attribute(SpanAttributes.LLM_USAGE_TOTAL_TOKENS, token_process.total_tokens)
425+
# Set prompt attributes from args
426+
if args and isinstance(args[0], list):
427+
for i, message in enumerate(args[0]):
428+
if isinstance(message, dict):
429+
if "role" in message:
430+
span.set_attribute(MessageAttributes.PROMPT_ROLE.format(i=i), message["role"])
431+
if "content" in message:
432+
span.set_attribute(MessageAttributes.PROMPT_CONTENT.format(i=i), message["content"])
479433

480-
if duration_histogram:
481-
duration = time.time() - start_time
482-
duration_histogram.record(
483-
duration,
484-
attributes={
485-
SpanAttributes.LLM_SYSTEM: "crewai",
486-
SpanAttributes.LLM_RESPONSE_MODEL: str(instance.model),
487-
},
488-
)
434+
# Set completion attributes from result
435+
if result:
436+
span.set_attribute(MessageAttributes.COMPLETION_CONTENT.format(i=0), str(result))
437+
span.set_attribute(MessageAttributes.COMPLETION_ROLE.format(i=0), "assistant")
438+
439+
# Set token usage attributes from callbacks
440+
if "callbacks" in kwargs and kwargs["callbacks"] and hasattr(kwargs["callbacks"][0], "token_cost_process"):
441+
token_process = kwargs["callbacks"][0].token_cost_process
442+
if hasattr(token_process, "completion_tokens"):
443+
span.set_attribute(SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, token_process.completion_tokens)
444+
if hasattr(token_process, "prompt_tokens"):
445+
span.set_attribute(SpanAttributes.LLM_USAGE_PROMPT_TOKENS, token_process.prompt_tokens)
446+
if hasattr(token_process, "total_tokens"):
447+
span.set_attribute(SpanAttributes.LLM_USAGE_TOTAL_TOKENS, token_process.total_tokens)
448+
449+
if duration_histogram:
450+
duration = time.time() - start_time
451+
duration_histogram.record(
452+
duration,
453+
attributes={
454+
SpanAttributes.LLM_SYSTEM: "crewai",
455+
SpanAttributes.LLM_RESPONSE_MODEL: str(instance.model),
456+
},
457+
)
489458

490-
span.set_status(Status(StatusCode.OK))
491-
return result
492-
except Exception as e:
493-
span.set_status(Status(StatusCode.ERROR, str(e)))
494-
logger.error("Error in trace creation: %s", e)
495-
raise e
496-
except Exception as e:
497-
logger.error(f"Error in provider detection: {e}")
498-
raise e
459+
span.set_status(Status(StatusCode.OK))
460+
return result
461+
except Exception as ex:
462+
span.set_status(Status(StatusCode.ERROR, str(ex)))
463+
logger.error("Error in trace creation: %s", ex)
464+
raise
499465

500466

501467
def wrap_tool_execution(tracer, duration_histogram, environment, application_name):

0 commit comments

Comments
 (0)