Skip to content

Commit af291df

Browse files
committed
Moved model to fixture and changed imports
1 parent 0989748 commit af291df

File tree

4 files changed

+28
-28
lines changed

4 files changed

+28
-28
lines changed

instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,9 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
from typing import Any, Dict, List, Optional
15+
from __future__ import annotations
16+
17+
from typing import Any
1618
from uuid import UUID
1719

1820
from langchain_core.callbacks import BaseCallbackHandler # type: ignore
@@ -43,13 +45,13 @@ def __init__(
4345

4446
def on_chat_model_start(
4547
self,
46-
serialized: Dict[str, Any],
48+
serialized: dict[str, Any],
4749
messages: List[List[BaseMessage]], # type: ignore
4850
*,
4951
run_id: UUID,
50-
tags: Optional[List[str]] = None,
51-
parent_run_id: Optional[UUID] = None,
52-
metadata: Optional[Dict[str, Any]] = None,
52+
tags: list[str] | None,
53+
parent_run_id: UUID | None,
54+
metadata: dict[str, Any] | None,
5355
**kwargs: Any,
5456
) -> None:
5557
invocation_params = kwargs.get("invocation_params")
@@ -91,7 +93,6 @@ def on_chat_model_start(
9193
span.set_attribute(GenAI.GEN_AI_REQUEST_MAX_TOKENS, max_tokens)
9294
provider = metadata.get("ls_provider")
9395
if provider is not None:
94-
# TODO: add to semantic conventions
9596
span.set_attribute("gen_ai.provider.name", provider)
9697
temperature = metadata.get("ls_temperature")
9798
if temperature is not None:
@@ -104,7 +105,7 @@ def on_llm_end(
104105
response: LLMResult, # type: ignore
105106
*,
106107
run_id: UUID,
107-
parent_run_id: Optional[UUID] = None,
108+
parent_run_id: UUID | None,
108109
**kwargs: Any,
109110
) -> None:
110111
span = self.span_manager.get_span(run_id)
@@ -113,7 +114,7 @@ def on_llm_end(
113114
# If the span does not exist, we cannot set attributes or end it
114115
return
115116

116-
finish_reasons: List[str] = []
117+
finish_reasons: list[str] = []
117118
for generation in getattr(response, "generations", []): # type: ignore
118119
for chat_generation in generation:
119120
generation_info = getattr(
@@ -166,7 +167,7 @@ def on_llm_error(
166167
error: BaseException,
167168
*,
168169
run_id: UUID,
169-
parent_run_id: Optional[UUID] = None,
170+
parent_run_id: UUID | None,
170171
**kwargs: Any,
171172
) -> None:
172173
self.span_manager.handle_error(error, run_id)

instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/span_manager.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,8 @@ def create_llm_span(
8686
GenAI.GEN_AI_OPERATION_NAME,
8787
GenAI.GenAiOperationNameValues.CHAT.value,
8888
)
89-
span.set_attribute(GenAI.GEN_AI_REQUEST_MODEL, request_model)
89+
if request_model:
90+
span.set_attribute(GenAI.GEN_AI_REQUEST_MODEL, request_model)
9091

9192
return span
9293

instrumentation-genai/opentelemetry-instrumentation-langchain/tests/conftest.py

Lines changed: 13 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,19 @@
1414
InMemorySpanExporter,
1515
)
1616

17+
@pytest.fixture(scope="function", name="llm_model")
18+
def fixture_llm_model():
19+
llm = ChatOpenAI(
20+
model="gpt-3.5-turbo",
21+
temperature=0.1,
22+
max_tokens=100,
23+
top_p=0.9,
24+
frequency_penalty=0.5,
25+
presence_penalty=0.5,
26+
stop_sequences=["\n", "Human:", "AI:"],
27+
seed=100,
28+
)
29+
yield llm
1730

1831
@pytest.fixture(scope="function", name="span_exporter")
1932
def fixture_span_exporter():
@@ -47,11 +60,6 @@ def environment():
4760
os.environ["OPENAI_API_KEY"] = "test_openai_api_key"
4861

4962

50-
@pytest.fixture
51-
def chatOpenAI_client():
52-
return ChatOpenAI()
53-
54-
5563
@pytest.fixture(scope="module")
5664
def vcr_config():
5765
return {

instrumentation-genai/opentelemetry-instrumentation-langchain/tests/test_llm_call.py

Lines changed: 3 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -8,28 +8,18 @@
88
from opentelemetry.semconv._incubating.attributes import gen_ai_attributes
99

1010

11-
# span_exporter, chatOpenAI_client, start_instrumentation are coming from fixtures defined in conftest.py
11+
# span_exporter, start_instrumentation, llm_model are coming from fixtures defined in conftest.py
1212
@pytest.mark.vcr()
1313
def test_langchain_call(
14-
span_exporter, chatOpenAI_client, start_instrumentation
14+
span_exporter, start_instrumentation, llm_model
1515
):
16-
llm = ChatOpenAI(
17-
model="gpt-3.5-turbo",
18-
temperature=0.1,
19-
max_tokens=100,
20-
top_p=0.9,
21-
frequency_penalty=0.5,
22-
presence_penalty=0.5,
23-
stop_sequences=["\n", "Human:", "AI:"],
24-
seed=100,
25-
)
2616

2717
messages = [
2818
SystemMessage(content="You are a helpful assistant!"),
2919
HumanMessage(content="What is the capital of France?"),
3020
]
3121

32-
response = llm.invoke(messages)
22+
response = llm_model.invoke(messages)
3323
assert response.content == "The capital of France is Paris."
3424

3525
# verify spans

0 commit comments

Comments
 (0)