Skip to content

Commit 616b89c

Browse files
authored
Update simulator-interaction-data.md
1 parent 67dee5c commit 616b89c

File tree

1 file changed

+11
-11
lines changed

1 file changed

+11
-11
lines changed

articles/ai-foundry/how-to/develop/simulator-interaction-data.md

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -123,16 +123,16 @@ async def callback(
123123
context: Optional[Dict[str, Any]] = None,
124124
) -> dict:
125125
messages_list = messages["messages"]
126-
# Get the last message
126+
# Get the last message.
127127
latest_message = messages_list[-1]
128128
query = latest_message["content"]
129129
context = latest_message.get("context", None) # looks for context, default None
130-
# Call your endpoint or AI application here
130+
# Call your endpoint or AI application here:
131131
current_dir = os.path.dirname(__file__)
132132
prompty_path = os.path.join(current_dir, "application.prompty")
133133
_flow = load_flow(source=prompty_path, model={"configuration": azure_ai_project})
134134
response = _flow(query=query, context=context, conversation_history=messages_list)
135-
# Format the response to follow the OpenAI chat protocol
135+
# Format the response so that it follows the OpenAI chat protocol.
136136
formatted_response = {
137137
"content": response,
138138
"role": "assistant",
@@ -199,7 +199,7 @@ outputs = await simulator(
199199
num_queries=4,
200200
max_conversation_turns=2,
201201
tasks=tasks,
202-
query_response_generating_prompty=query_response_prompty_override # optional, use your own prompt to control how query-response pairs are generated from the input text to be used in your simulator
202+
query_response_generating_prompty=query_response_prompty_override # Optional, use your own prompt to control how query-response pairs are generated from the input text to be used in your simulator.
203203
)
204204

205205
for output in outputs:
@@ -231,7 +231,7 @@ outputs = await simulator(
231231
When you incorporate conversation starters, the simulator can handle prespecified repeatable contextually relevant interactions. This capability is useful for simulating the same user turns in a conversation or interaction and evaluating the differences.
232232

233233
```python
234-
conversation_turns = [ # Defines predefined conversation sequences, each starting with a conversation starter.
234+
conversation_turns = [ # Defines predefined conversation sequences. Each starts with a conversation starter.
235235
[
236236
"Hello, how are you?",
237237
"I want to learn more about Leonardo da Vinci",
@@ -247,7 +247,7 @@ conversation_turns = [ # Defines predefined conversation sequences, each startin
247247
outputs = await simulator(
248248
target=callback,
249249
text=text,
250-
conversation_turns=conversation_turns, # optional, ensures the user simulator follows the predefined conversation sequences
250+
conversation_turns=conversation_turns, # This is optional. It ensures the user simulator follows the predefined conversation sequences.
251251
max_conversation_turns=5,
252252
user_simulator_prompty="user_simulating_application.prompty",
253253
user_simulator_prompty_kwargs=user_simulator_prompty_kwargs,
@@ -287,7 +287,7 @@ with open(output_file, "w") as file:
287287
for output in outputs:
288288
file.write(output.to_eval_qr_json_lines())
289289

290-
# Then you can pass it into our Groundedness evaluator to evaluate it for groundedness
290+
# Then, you can pass it into our Groundedness evaluator to evaluate it for groundedness:
291291
groundedness_evaluator = GroundednessEvaluator(model_config=model_config)
292292
eval_output = evaluate(
293293
data=output_file,
@@ -335,14 +335,14 @@ async def callback(
335335
query = messages["messages"][0]["content"]
336336
context = None
337337

338-
# Add file contents for summarization or re-write
338+
# Add file contents for summarization or re-write.
339339
if 'file_content' in messages["template_parameters"]:
340340
query += messages["template_parameters"]['file_content']
341341

342-
# Call your own endpoint and pass your query as input. Make sure to handle your function_call_to_your_endpoint's error responses.
342+
# Call your own endpoint and pass your query as input. Make sure to handle the error responses of function_call_to_your_endpoint.
343343
response = await function_call_to_your_endpoint(query)
344344

345-
# Format responses in OpenAI message protocol
345+
# Format responses in OpenAI message protocol:
346346
formatted_response = {
347347
"content": response,
348348
"role": "assistant",
@@ -374,7 +374,7 @@ outputs = await adversarial_simulator(
374374
max_simulation_results=3, #optional
375375
)
376376

377-
# By default simulator outputs json, use the following helper function to convert to QA pairs in JSONL format
377+
# By default, the simulator outputs in JSON format. Use the following helper function to convert to QA pairs in JSONL format:
378378
print(outputs.to_eval_qa_json_lines())
379379
```
380380

0 commit comments

Comments
 (0)