Skip to content

Commit 8dcdcce

Browse files
Release 2.1.24 (#220)
* remove logs * remove requirements * Bump version * Account for NOT_GIVEN sentinel type (#219) * Account for NOT_GIVEN sentinel type * Bump version
1 parent a130443 commit 8dcdcce

File tree

2 files changed

+18
-17
lines changed

2 files changed

+18
-17
lines changed

src/langtrace_python_sdk/instrumentation/openai/patch.py

Lines changed: 17 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@
2929
)
3030
from langtrace_python_sdk.constants.instrumentation.openai import APIS
3131
from langtrace_python_sdk.utils.llm import calculate_prompt_tokens, estimate_tokens
32+
from openai._types import NOT_GIVEN
3233

3334

3435
def images_generate(original_method, version, tracer):
@@ -470,16 +471,16 @@ def traced_method(wrapped, instance, args, kwargs):
470471
attributes = LLMSpanAttributes(**span_attributes)
471472

472473
tools = []
473-
if kwargs.get("temperature") is not None:
474+
if kwargs.get("temperature") is not None and kwargs.get("temperature") != NOT_GIVEN:
474475
attributes.llm_temperature = kwargs.get("temperature")
475-
if kwargs.get("top_p") is not None:
476+
if kwargs.get("top_p") is not None and kwargs.get("top_p") != NOT_GIVEN:
476477
attributes.llm_top_p = kwargs.get("top_p")
477-
if kwargs.get("user") is not None:
478+
if kwargs.get("user") is not None and kwargs.get("user") != NOT_GIVEN:
478479
attributes.llm_user = kwargs.get("user")
479-
if kwargs.get("functions") is not None:
480+
if kwargs.get("functions") is not None and kwargs.get("functions") != NOT_GIVEN:
480481
for function in kwargs.get("functions"):
481482
tools.append(json.dumps({"type": "function", "function": function}))
482-
if kwargs.get("tools") is not None:
483+
if kwargs.get("tools") is not None and kwargs.get("tools") != NOT_GIVEN:
483484
tools.append(json.dumps(kwargs.get("tools")))
484485
if len(tools) > 0:
485486
attributes.llm_tools = json.dumps(tools)
@@ -498,7 +499,7 @@ def traced_method(wrapped, instance, args, kwargs):
498499
try:
499500
# Attempt to call the original method
500501
result = wrapped(*args, **kwargs)
501-
if kwargs.get("stream") is False or kwargs.get("stream") is None:
502+
if kwargs.get("stream") is False or kwargs.get("stream") is None or kwargs.get("stream") == NOT_GIVEN:
502503
span.set_attribute("llm.model", result.model)
503504
if hasattr(result, "choices") and result.choices is not None:
504505
responses = [
@@ -527,7 +528,7 @@ def traced_method(wrapped, instance, args, kwargs):
527528
span.set_attribute("llm.responses", json.dumps(responses))
528529
if (
529530
hasattr(result, "system_fingerprint")
530-
and result.system_fingerprint is not None
531+
and result.system_fingerprint is not None and result.system_fingerprint != NOT_GIVEN
531532
):
532533
span.set_attribute(
533534
"llm.system.fingerprint", result.system_fingerprint
@@ -554,7 +555,7 @@ def traced_method(wrapped, instance, args, kwargs):
554555
)
555556

556557
# iterate over kwargs.get("functions") and calculate the prompt tokens
557-
if kwargs.get("functions") is not None:
558+
if kwargs.get("functions") is not None and kwargs.get("functions") != NOT_GIVEN:
558559
for function in kwargs.get("functions"):
559560
prompt_tokens += calculate_prompt_tokens(
560561
json.dumps(function), kwargs.get("model")
@@ -640,16 +641,16 @@ async def traced_method(wrapped, instance, args, kwargs):
640641
attributes = LLMSpanAttributes(**span_attributes)
641642

642643
tools = []
643-
if kwargs.get("temperature") is not None:
644+
if kwargs.get("temperature") is not None and kwargs.get("temperature") != NOT_GIVEN:
644645
attributes.llm_temperature = kwargs.get("temperature")
645-
if kwargs.get("top_p") is not None:
646+
if kwargs.get("top_p") is not None and kwargs.get("top_p") != NOT_GIVEN:
646647
attributes.llm_top_p = kwargs.get("top_p")
647-
if kwargs.get("user") is not None:
648+
if kwargs.get("user") is not None and kwargs.get("user") != NOT_GIVEN:
648649
attributes.llm_user = kwargs.get("user")
649-
if kwargs.get("functions") is not None:
650+
if kwargs.get("functions") is not None and kwargs.get("functions") != NOT_GIVEN:
650651
for function in kwargs.get("functions"):
651652
tools.append(json.dumps({"type": "function", "function": function}))
652-
if kwargs.get("tools") is not None:
653+
if kwargs.get("tools") is not None and kwargs.get("tools") != NOT_GIVEN:
653654
tools.append(json.dumps(kwargs.get("tools")))
654655
if len(tools) > 0:
655656
attributes.llm_tools = json.dumps(tools)
@@ -666,7 +667,7 @@ async def traced_method(wrapped, instance, args, kwargs):
666667
try:
667668
# Attempt to call the original method
668669
result = await wrapped(*args, **kwargs)
669-
if kwargs.get("stream") is False or kwargs.get("stream") is None:
670+
if kwargs.get("stream") is False or kwargs.get("stream") is None or kwargs.get("stream") == NOT_GIVEN:
670671
span.set_attribute("llm.model", result.model)
671672
if hasattr(result, "choices") and result.choices is not None:
672673
responses = [
@@ -695,7 +696,7 @@ async def traced_method(wrapped, instance, args, kwargs):
695696
span.set_attribute("llm.responses", json.dumps(responses))
696697
if (
697698
hasattr(result, "system_fingerprint")
698-
and result.system_fingerprint is not None
699+
and result.system_fingerprint is not None and result.system_fingerprint != NOT_GIVEN
699700
):
700701
span.set_attribute(
701702
"llm.system.fingerprint", result.system_fingerprint
@@ -722,7 +723,7 @@ async def traced_method(wrapped, instance, args, kwargs):
722723
)
723724

724725
# iterate over kwargs.get("functions") and calculate the prompt tokens
725-
if kwargs.get("functions") is not None:
726+
if kwargs.get("functions") is not None and kwargs.get("functions") != NOT_GIVEN:
726727
for function in kwargs.get("functions"):
727728
prompt_tokens += calculate_prompt_tokens(
728729
json.dumps(function), kwargs.get("model")
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
__version__ = "2.1.23"
1+
__version__ = "2.1.24"

0 commit comments

Comments
 (0)