You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
File d:\LangGraph\langgraph-handson\Lib\site-packages\groq\resources\chat\completions.py:368, in Completions.create(self, messages, model, exclude_domains, frequency_penalty, function_call, functions, include_domains, logit_bias, logprobs, max_completion_tokens, max_tokens, metadata, n, parallel_tool_calls, presence_penalty, reasoning_effort, reasoning_format, response_format, search_settings, seed, service_tier, stop, store, stream, temperature, tool_choice, tools, top_logprobs, top_p, user, extra_headers, extra_query, extra_body, timeout)
181 def create(
182 self,
183 *,
(...) 229 timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
230 ) -> ChatCompletion | Stream[ChatCompletionChunk]:
231 """
232 Creates a model response for the given chat conversation.
233
(...) 366 timeout: Override the client-level default timeout for this request, in seconds
367 """
--> 368 return self._post(
369 "/openai/v1/chat/completions",
370 body=maybe_transform(
371 {
372 "messages": messages,
...
-> 1034 raise self._make_status_error_from_response(err.response) from None
1036 break
1038 assert response is not None, "could not resolve response (should never happen)"
BadRequestError: Error code: 400 - {'error': {'message': "Failed to call a function. Please adjust your prompt. See 'failed_generation' for more details.", 'type': 'invalid_request_error', 'code': 'tool_use_failed', 'failed_generation': '\n{\n "tool_calls": [\n {\n "id": "pending",\n "type": "function",\n "function": {\n "name": "define_ai"\n },\n "parameters": {\n "description": "A type of artificial intelligence that enables machines to learn, reason, and act like humans."\n }\n }\n ]\n}\n'}}
Output is truncated. View as a scrollable element or open in a text editor. Adjust cell output settings...
reacted with thumbs up emoji reacted with thumbs down emoji reacted with laugh emoji reacted with hooray emoji reacted with confused emoji reacted with heart emoji reacted with rocket emoji reacted with eyes emoji
Uh oh!
There was an error while loading. Please reload this page.
-
from langchain.chat_models import init_chat_model
llm=init_chat_model("groq:llama3-8b-8192")
llm
from langchain_core.tools import tool
@tool
def add(a:float,b:float):
"""Add two number"""
return a+b
tools=[add]
tool_node=ToolNode([add])
llm_with_tool=llms.bind_tools([add])
and if i do
llm_with_tool.invoke("What is machine learning?")
its raises a BADREQUESTERROR
BadRequestError Traceback (most recent call last)
Cell In[8], line 1
----> 1 llm_with_tool.invoke("What is AI?")
File d:\LangGraph\langgraph-handson\Lib\site-packages\langchain_core\runnables\base.py:5431, in RunnableBindingBase.invoke(self, input, config, **kwargs)
5424 @OverRide
5425 def invoke(
5426 self,
(...) 5429 **kwargs: Optional[Any],
5430 ) -> Output:
-> 5431 return self.bound.invoke(
5432 input,
5433 self._merge_configs(config),
5434 **{**self.kwargs, **kwargs},
5435 )
File d:\LangGraph\langgraph-handson\Lib\site-packages\langchain_core\language_models\chat_models.py:378, in BaseChatModel.invoke(self, input, config, stop, **kwargs)
366 @OverRide
367 def invoke(
368 self,
(...) 373 **kwargs: Any,
374 ) -> BaseMessage:
375 config = ensure_config(config)
376 return cast(
377 "ChatGeneration",
--> 378 self.generate_prompt(
379 [self._convert_input(input)],
380 stop=stop,
381 callbacks=config.get("callbacks"),
382 tags=config.get("tags"),
383 metadata=config.get("metadata"),
384 run_name=config.get("run_name"),
385 run_id=config.pop("run_id", None),
386 **kwargs,
387 ).generations[0][0],
388 ).message
File d:\LangGraph\langgraph-handson\Lib\site-packages\langchain_core\language_models\chat_models.py:963, in BaseChatModel.generate_prompt(self, prompts, stop, callbacks, **kwargs)
954 @OverRide
955 def generate_prompt(
956 self,
(...) 960 **kwargs: Any,
961 ) -> LLMResult:
962 prompt_messages = [p.to_messages() for p in prompts]
--> 963 return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)
File d:\LangGraph\langgraph-handson\Lib\site-packages\langchain_core\language_models\chat_models.py:782, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, run_id, **kwargs)
779 for i, m in enumerate(input_messages):
780 try:
781 results.append(
--> 782 self._generate_with_cache(
783 m,
784 stop=stop,
785 run_manager=run_managers[i] if run_managers else None,
786 **kwargs,
787 )
788 )
789 except BaseException as e:
790 if run_managers:
File d:\LangGraph\langgraph-handson\Lib\site-packages\langchain_core\language_models\chat_models.py:1028, in BaseChatModel._generate_with_cache(self, messages, stop, run_manager, **kwargs)
1026 result = generate_from_stream(iter(chunks))
1027 elif inspect.signature(self._generate).parameters.get("run_manager"):
-> 1028 result = self._generate(
1029 messages, stop=stop, run_manager=run_manager, **kwargs
1030 )
1031 else:
1032 result = self._generate(messages, stop=stop, **kwargs)
File d:\LangGraph\langgraph-handson\Lib\site-packages\langchain_groq\chat_models.py:536, in ChatGroq._generate(self, messages, stop, run_manager, **kwargs)
531 message_dicts, params = self._create_message_dicts(messages, stop)
532 params = {
533 **params,
534 **kwargs,
535 }
--> 536 response = self.client.create(messages=message_dicts, **params)
537 return self._create_chat_result(response)
File d:\LangGraph\langgraph-handson\Lib\site-packages\groq\resources\chat\completions.py:368, in Completions.create(self, messages, model, exclude_domains, frequency_penalty, function_call, functions, include_domains, logit_bias, logprobs, max_completion_tokens, max_tokens, metadata, n, parallel_tool_calls, presence_penalty, reasoning_effort, reasoning_format, response_format, search_settings, seed, service_tier, stop, store, stream, temperature, tool_choice, tools, top_logprobs, top_p, user, extra_headers, extra_query, extra_body, timeout)
181 def create(
182 self,
183 *,
(...) 229 timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
230 ) -> ChatCompletion | Stream[ChatCompletionChunk]:
231 """
232 Creates a model response for the given chat conversation.
233
(...) 366 timeout: Override the client-level default timeout for this request, in seconds
367 """
--> 368 return self._post(
369 "/openai/v1/chat/completions",
370 body=maybe_transform(
371 {
372 "messages": messages,
...
-> 1034 raise self._make_status_error_from_response(err.response) from None
1036 break
1038 assert response is not None, "could not resolve response (should never happen)"
BadRequestError: Error code: 400 - {'error': {'message': "Failed to call a function. Please adjust your prompt. See 'failed_generation' for more details.", 'type': 'invalid_request_error', 'code': 'tool_use_failed', 'failed_generation': '\n{\n "tool_calls": [\n {\n "id": "pending",\n "type": "function",\n "function": {\n "name": "define_ai"\n },\n "parameters": {\n "description": "A type of artificial intelligence that enables machines to learn, reason, and act like humans."\n }\n }\n ]\n}\n'}}
Output is truncated. View as a scrollable element or open in a text editor. Adjust cell output settings...
Beta Was this translation helpful? Give feedback.
All reactions