Skip to content

Commit f0e8aa0

Browse files
committed
update response handling in LangChainProvider to improve error logging and ensure structured output is a dictionary. Update metrics handling accordingly.
1 parent 93e6a8e commit f0e8aa0

File tree

1 file changed

+20
-16
lines changed

1 file changed

+20
-16
lines changed

packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py

Lines changed: 20 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -93,24 +93,28 @@ async def invoke_structured_model(
9393
structured_llm = self._llm.with_structured_output(response_structure)
9494
response = await structured_llm.ainvoke(langchain_messages)
9595

96-
metrics = LDAIMetrics(
97-
success=True,
98-
usage=TokenUsage(total=0, input=0, output=0),
99-
)
100-
101-
if isinstance(response, dict):
102-
raw_response = str(response)
103-
else:
104-
import json
105-
try:
106-
raw_response = json.dumps(response)
107-
except (TypeError, ValueError):
108-
raw_response = str(response)
96+
if not isinstance(response, dict):
97+
if self.logger:
98+
self.logger.warn(
99+
f'Structured output did not return a dict. '
100+
f'Got: {type(response)}'
101+
)
102+
return StructuredResponse(
103+
data={},
104+
raw_response='',
105+
metrics=LDAIMetrics(
106+
success=False,
107+
usage=TokenUsage(total=0, input=0, output=0),
108+
),
109+
)
109110

110111
return StructuredResponse(
111-
data=response if isinstance(response, dict) else {'result': response},
112-
raw_response=raw_response,
113-
metrics=metrics,
112+
data=response,
113+
raw_response=str(response),
114+
metrics=LDAIMetrics(
115+
success=True,
116+
usage=TokenUsage(total=0, input=0, output=0),
117+
),
114118
)
115119
except Exception as error:
116120
if self.logger:

0 commit comments

Comments
 (0)