Skip to content

Commit 166df20

Browse files
authored
Update simulator-interaction-data.md
1 parent 581ad56 commit 166df20

File tree

1 file changed

+18
-18
lines changed

1 file changed

+18
-18
lines changed

articles/ai-foundry/how-to/develop/simulator-interaction-data.md

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ from azure.identity import DefaultAzureCredential
5858
import wikipedia
5959
import os
6060
from typing import List, Dict, Any, Optional
61-
# Prepare the text to send to the simulator
61+
# Prepare the text to send to the simulator.
6262
wiki_search_term = "Leonardo da vinci"
6363
wiki_title = wikipedia.search(wiki_search_term)[0]
6464
wiki_page = wikipedia.page(wiki_title)
@@ -126,7 +126,7 @@ async def callback(
126126
# Get the last message.
127127
latest_message = messages_list[-1]
128128
query = latest_message["content"]
129-
context = latest_message.get("context", None) # looks for context, default None
129+
context = latest_message.get("context", None) # Looks for context. The default is None.
130130
# Call your endpoint or AI application here:
131131
current_dir = os.path.dirname(__file__)
132132
prompty_path = os.path.join(current_dir, "application.prompty")
@@ -169,7 +169,7 @@ With the simulator initialized, you can now run it to generate synthetic convers
169169
outputs = await simulator(
170170
target=callback,
171171
text=text,
172-
num_queries=1, # Minimal number of queries
172+
num_queries=1, # Minimal number of queries.
173173
)
174174

175175
```
@@ -199,7 +199,7 @@ outputs = await simulator(
199199
num_queries=4,
200200
max_conversation_turns=2,
201201
tasks=tasks,
202-
query_response_generating_prompty=query_response_prompty_override # Optional, use your own prompt to control how query-response pairs are generated from the input text to be used in your simulator.
202+
query_response_generating_prompty=query_response_prompty_override # Optional: Use your own prompt to control how query-response pairs are generated from the input text to be used in your simulator.
203203
)
204204

205205
for output in outputs:
@@ -220,9 +220,9 @@ user_simulator_prompty_kwargs = {
220220
outputs = await simulator(
221221
target=callback,
222222
text=text,
223-
num_queries=1, # Minimal number of queries
224-
user_simulator_prompty="user_simulating_application.prompty", # A prompty which accepts all the following kwargs can be passed to override default user behaviour.
225-
user_simulator_prompty_kwargs=user_simulator_prompty_kwargs # Uses a dictionary to override default model parameters such as `temperature` and `top_p`.
223+
num_queries=1, # Minimal number of queries.
224+
user_simulator_prompty="user_simulating_application.prompty", # A prompty that accepts all the following kwargs can be passed to override the default user behavior.
225+
user_simulator_prompty_kwargs=user_simulator_prompty_kwargs # It uses a dictionary to override default model parameters such as `temperature` and `top_p`.
226226
)
227227
```
228228

@@ -295,7 +295,7 @@ eval_output = evaluate(
295295
"groundedness": groundedness_evaluator
296296
},
297297
output_path="groundedness_eval_output.json",
298-
azure_ai_project=project_scope # Optional for uploading to your Azure AI Project
298+
azure_ai_project=project_scope # This is an optional step used for uploading to your Azure AI Project.
299299
)
300300
```
301301

@@ -339,7 +339,7 @@ async def callback(
339339
if 'file_content' in messages["template_parameters"]:
340340
query += messages["template_parameters"]['file_content']
341341

342-
# Call your own endpoint and pass your query as input. Make sure to handle the error responses of function_call_to_your_endpoint.
342+
# Call your own endpoint and pass your query as input. Make sure to handle the error responses of `function_call_to_your_endpoint`.
343343
response = await function_call_to_your_endpoint(query)
344344

345345
# Format responses in OpenAI message protocol:
@@ -368,9 +368,9 @@ scenario = AdversarialScenario.ADVERSARIAL_QA
368368
adversarial_simulator = AdversarialSimulator(azure_ai_project=azure_ai_project, credential=credential)
369369

370370
outputs = await adversarial_simulator(
371-
scenario=scenario, # required adversarial scenario to simulate
372-
target=callback, # callback function to simulate against
373-
max_conversation_turns=1, #optional, applicable only to conversation scenario
371+
scenario=scenario, # Required: Adversarial scenario to simulate.
372+
target=callback, # Callback function to simulate against.
373+
max_conversation_turns=1, # Optional: Applicable only to the conversation scenario.
374374
max_simulation_results=3, #optional
375375
)
376376

@@ -526,9 +526,9 @@ Usage example:
526526
from azure.ai.evaluation.simulator import SupportedLanguages
527527

528528
outputs = await simulator(
529-
scenario=scenario, # required, adversarial scenario to simulate
530-
target=callback, # required, callback function to simulate against
531-
language=SupportedLanguages.Spanish # optional, default english
529+
scenario=scenario, # Required: Adversarial scenario to simulate.
530+
target=callback, # Required: Callback function to simulate against.
531+
language=SupportedLanguages.Spanish # Optional: The default is English.
532532
)
533533
```
534534

@@ -538,9 +538,9 @@ By default, the `AdversarialSimulator` class randomizes interactions in every si
538538

539539
```python
540540
outputs = await simulator(
541-
scenario=scenario, # required, adversarial scenario to simulate
542-
target=callback, # required, callback function to simulate against
543-
randomization_seed=1 # optional
541+
scenario=scenario, # Required: Adversarial scenario to simulate.
542+
target=callback, # Required: Callback function to simulate against.
543+
randomization_seed=1 # Optional.
544544
)
545545
```
546546

0 commit comments

Comments
 (0)