@@ -332,6 +332,38 @@ async def main():
332332 asyncio.run(main())
333333 ```
334334
335+ Args:
336+ schema (dict): The target JSON schema.
337+ If not provided use the `data_model` to infer it.
338+ data_model (DataModel | SymbolicDataModel | JsonDataModel): The target data
339+ model for structured output.
340+ language_model (LanguageModel): The language model to use.
341+ prompt_template (str): The jinja2 prompt template.
342+ examples (list): The default list of examples, the examples
343+ are a list of tuples containing input/output JSON pairs.
344+ instructions (str): The default instructions being a string containing
345+ instructions for the language model.
346+ final_instructions (str): Optional. The instructions for the final generator
347+ that produces the structured output. If not provided, use the same
348+ instructions as the tool calls generator.
349+ temperature (float): Optional. The temperature for the LM call.
350+ use_inputs_schema (bool): Optional. Whether or not use the inputs schema in
351+ the prompt (Default to False).
352+ use_outputs_schema (bool): Optional. Whether or not use the outputs schema in
353+ the prompt (Default to False).
354+ reasoning_effort (string): Optional. The reasoning effort for the LM call
355+ between ['minimal', 'low', 'medium', 'high', 'disable', 'none', None].
356+ Default to None (no reasoning).
357+ tools (list): The list of `Tool` or MCP tools available to the agent.
358+ autonomous (bool): Optional. Whether the agent runs autonomously
359+ (executing tools automatically) or in interactive mode where the user
360+ validates tool arguments before execution (Default to True).
361+ return_inputs_with_trajectory (bool): Optional. Whether or not to return the
362+ inputs concatenated with the full message trajectory (Default to True).
363+ max_iterations (int): Optional. The maximum number of tool calling iterations
364+ in autonomous mode (Default to 5). Ignored in interactive mode.
365+ name (str): Optional. The name of the module.
366+ description (str): Optional. The description of the module.
335367 """
336368
337369 def __init__ (
@@ -342,6 +374,7 @@ def __init__(
342374 prompt_template = None ,
343375 examples = None ,
344376 instructions = None ,
377+ final_instructions = None ,
345378 temperature = 0.0 ,
346379 use_inputs_schema = False ,
347380 use_outputs_schema = False ,
@@ -366,6 +399,10 @@ def __init__(
366399 if not instructions :
367400 instructions = get_default_instructions ()
368401 self .instructions = instructions
402+ if not final_instructions :
403+ self .final_instructions = instructions
404+ else :
405+ self .final_instructions = final_instructions
369406 self .temperature = temperature
370407
371408 self .examples = examples
@@ -401,7 +438,7 @@ def __init__(
401438 self .final_generator = Generator (
402439 schema = self .schema ,
403440 language_model = self .language_model ,
404- instructions = self .instructions ,
441+ instructions = self .final_instructions ,
405442 temperature = self .temperature ,
406443 reasoning_effort = self .reasoning_effort ,
407444 return_inputs = False ,
@@ -709,6 +746,7 @@ def get_config(self):
709746 "prompt_template" : self .prompt_template ,
710747 "examples" : self .examples ,
711748 "instructions" : self .instructions ,
749+ "final_instructions" : self .final_instructions ,
712750 "temperature" : self .temperature ,
713751 "use_inputs_schema" : self .use_inputs_schema ,
714752 "use_outputs_schema" : self .use_outputs_schema ,
0 commit comments