Skip to content

Commit 93e6a8e

Browse files
committed
remove extra comments.
1 parent 5b401a5 commit 93e6a8e

File tree

1 file changed

+1
-22
lines changed

1 file changed

+1
-22
lines changed

packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py

Lines changed: 1 addition & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -48,34 +48,23 @@ async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse:
4848
:return: ChatResponse containing the model's response and metrics
4949
"""
5050
try:
51-
# Convert LDMessage[] to LangChain messages
5251
langchain_messages = LangChainProvider.convert_messages_to_langchain(messages)
53-
54-
# Get the LangChain response
5552
response: BaseMessage = await self._llm.ainvoke(langchain_messages)
56-
57-
# Generate metrics early (assumes success by default)
5853
metrics = LangChainProvider.get_ai_metrics_from_response(response)
5954

60-
# Extract text content from the response
6155
content: str = ''
6256
if isinstance(response.content, str):
6357
content = response.content
6458
else:
65-
# Log warning for non-string content (likely multimodal)
6659
if self.logger:
6760
self.logger.warn(
6861
f'Multimodal response not supported, expecting a string. '
6962
f'Content type: {type(response.content)}, Content: {response.content}'
7063
)
71-
# Update metrics to reflect content loss
7264
metrics = LDAIMetrics(success=False, usage=metrics.usage)
7365

74-
# Create the assistant message
75-
assistant_message = LDMessage(role='assistant', content=content)
76-
7766
return ChatResponse(
78-
message=assistant_message,
67+
message=LDMessage(role='assistant', content=content),
7968
metrics=metrics,
8069
)
8170
except Exception as error:
@@ -100,20 +89,15 @@ async def invoke_structured_model(
10089
:return: StructuredResponse containing the structured data
10190
"""
10291
try:
103-
# Convert LDMessage[] to LangChain messages
10492
langchain_messages = LangChainProvider.convert_messages_to_langchain(messages)
105-
106-
# Get the LangChain response with structured output
10793
structured_llm = self._llm.with_structured_output(response_structure)
10894
response = await structured_llm.ainvoke(langchain_messages)
10995

110-
# Using structured output doesn't support metrics
11196
metrics = LDAIMetrics(
11297
success=True,
11398
usage=TokenUsage(total=0, input=0, output=0),
11499
)
115100

116-
# Handle response serialization
117101
if isinstance(response, dict):
118102
raw_response = str(response)
119103
else:
@@ -149,10 +133,6 @@ def get_chat_model(self) -> BaseChatModel:
149133
"""
150134
return self._llm
151135

152-
# =============================================================================
153-
# STATIC UTILITY METHODS
154-
# =============================================================================
155-
156136
@staticmethod
157137
def map_provider(ld_provider_name: str) -> str:
158138
"""
@@ -201,7 +181,6 @@ def get_ai_metrics_from_response(response: BaseMessage) -> LDAIMetrics:
201181
output=token_usage.get('completionTokens', 0) or token_usage.get('completion_tokens', 0),
202182
)
203183

204-
# LangChain responses that complete successfully are considered successful by default
205184
return LDAIMetrics(success=True, usage=usage)
206185

207186
@staticmethod

0 commit comments

Comments
 (0)