Skip to content

Commit 5a6cfbf

Browse files
authored
Impove function types and address PM requirements (Azure#38356)
* PM comment: remove leading empty lines and spaces for string prompt template * Improve types for complete function * Fix pylint error * Fix types for messages * Update release log and docstring * Fix linter issue
1 parent 28f4ec6 commit 5a6cfbf

File tree

7 files changed

+73
-11
lines changed

7 files changed

+73
-11
lines changed

sdk/ai/azure-ai-inference/CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
* Method `AIInferenceInstrumentor().instrument()` updated with an input argument `enable_content_recording`.
99
* Calling `AIInferenceInstrumentor().instrument()` twice no longer results in an exception.
1010
* Added method `AIInferenceInstrumentor().is_content_recording_enabled()`
11+
* Support [Prompty](https://github.com/microsoft/prompty) and prompt template from string. PromptTemplate class outputs an array of dictionary with OpenAI compatible message format.
1112

1213
### Bugs Fixed
1314

sdk/ai/azure-ai-inference/azure/ai/inference/_patch.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -265,7 +265,7 @@ def __init__(
265265
def complete(
266266
self,
267267
*,
268-
messages: List[_models.ChatRequestMessage],
268+
messages: Union[List[_models.ChatRequestMessage], List[Dict[str, Any]]],
269269
stream: Literal[False] = False,
270270
frequency_penalty: Optional[float] = None,
271271
presence_penalty: Optional[float] = None,
@@ -288,7 +288,7 @@ def complete(
288288
def complete(
289289
self,
290290
*,
291-
messages: List[_models.ChatRequestMessage],
291+
messages: Union[List[_models.ChatRequestMessage], List[Dict[str, Any]]],
292292
stream: Literal[True],
293293
frequency_penalty: Optional[float] = None,
294294
presence_penalty: Optional[float] = None,
@@ -311,7 +311,7 @@ def complete(
311311
def complete(
312312
self,
313313
*,
314-
messages: List[_models.ChatRequestMessage],
314+
messages: Union[List[_models.ChatRequestMessage], List[Dict[str, Any]]],
315315
stream: Optional[bool] = None,
316316
frequency_penalty: Optional[float] = None,
317317
presence_penalty: Optional[float] = None,
@@ -344,7 +344,7 @@ def complete(
344344
Typical usage begins with a chat message for the System role that provides instructions for
345345
the behavior of the assistant, followed by alternating messages between the User and
346346
Assistant roles. Required.
347-
:paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage]
347+
:paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] or list[dict[str, Any]]
348348
:keyword stream: A value indicating whether chat completions should be streamed for this request.
349349
Default value is False. If streaming is enabled, the response will be a StreamingChatCompletions.
350350
Otherwise the response will be a ChatCompletions.
@@ -473,7 +473,7 @@ def complete(
473473
self,
474474
body: Union[JSON, IO[bytes]] = _Unset,
475475
*,
476-
messages: List[_models.ChatRequestMessage] = _Unset,
476+
messages: Union[List[_models.ChatRequestMessage], List[Dict[str, Any]]] = _Unset,
477477
stream: Optional[bool] = None,
478478
frequency_penalty: Optional[float] = None,
479479
presence_penalty: Optional[float] = None,
@@ -507,7 +507,7 @@ def complete(
507507
Typical usage begins with a chat message for the System role that provides instructions for
508508
the behavior of the assistant, followed by alternating messages between the User and
509509
Assistant roles. Required.
510-
:paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage]
510+
:paramtype messages: list[~azure.ai.inference.models.ChatRequestMessage] or list[dict[str, Any]]
511511
:keyword stream: A value indicating whether chat completions should be streamed for this request.
512512
Default value is False. If streaming is enabled, the response will be a StreamingChatCompletions.
513513
Otherwise the response will be a ChatCompletions.

sdk/ai/azure-ai-inference/azure/ai/inference/prompts/_patch.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
from ._core import Prompty
1616
from ._mustache import render
1717
from ._prompty_utils import load, prepare
18+
from ._utils import remove_leading_empty_space
1819

1920

2021
class PromptTemplate:
@@ -105,7 +106,8 @@ def create_messages(self, data: Optional[Dict[str, Any]] = None, **kwargs) -> Li
105106
parsed = prepare(self.prompty, data)
106107
return parsed
107108
elif "prompt_template" in self._config:
108-
system_prompt = render(self._config["prompt_template"], data)
109+
prompt_template = remove_leading_empty_space(self._config["prompt_template"])
110+
system_prompt = render(prompt_template, data)
109111
return [{"role": "system", "content": system_prompt}]
110112
else:
111113
raise ValueError("Please provide valid prompt template")

sdk/ai/azure-ai-inference/azure/ai/inference/prompts/_utils.py

Lines changed: 36 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,10 @@
44
# ------------------------------------
55
# mypy: disable-error-code="import-untyped,return-value"
66
# pylint: disable=line-too-long,R,wrong-import-order,global-variable-not-assigned)
7+
import json
78
import re
9+
import sys
810
import yaml
9-
import json
1011
from typing import Any, Dict, Union
1112
from pathlib import Path
1213

@@ -72,3 +73,37 @@ def parse(contents):
7273
"body": body,
7374
"frontmatter": fmatter,
7475
}
76+
77+
78+
def remove_leading_empty_space(multiline_str: str) -> str:
79+
"""
80+
Processes a multiline string by:
81+
1. Removing empty lines
82+
2. Finding the minimum leading spaces
83+
3. Indenting all lines to the minimum level
84+
85+
:param multiline_str: The input multiline string.
86+
:type multiline_str: str
87+
:return: The processed multiline string.
88+
:rtype: str
89+
"""
90+
lines = multiline_str.splitlines()
91+
start_index = 0
92+
while start_index < len(lines) and lines[start_index].strip() == "":
93+
start_index += 1
94+
95+
# Find the minimum number of leading spaces
96+
min_spaces = sys.maxsize
97+
for line in lines[start_index:]:
98+
if len(line.strip()) == 0:
99+
continue
100+
spaces = len(line) - len(line.lstrip())
101+
spaces += line.lstrip().count("\t") * 2 # Count tabs as 2 spaces
102+
min_spaces = min(min_spaces, spaces)
103+
104+
# Remove leading spaces and indent to the minimum level
105+
processed_lines = []
106+
for line in lines[start_index:]:
107+
processed_lines.append(line[min_spaces:])
108+
109+
return "\n".join(processed_lines)

sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_prompt_string.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@
2323
`your-azure-region` is the Azure region where your model is deployed.
2424
2) AZURE_AI_CHAT_KEY - Your model key (a 32-character string). Keep it secret.
2525
"""
26-
# mypy: disable-error-code="union-attr,arg-type"
2726
# pyright: reportAttributeAccessIssue=false
2827

2928

@@ -73,7 +72,7 @@ def sample_chat_completions_from_input_prompt_string():
7372
messages = prompt_template.create_messages(input=input, rules=rules, chat_history=chat_history)
7473

7574
client = ChatCompletionsClient(endpoint=endpoint, credential=AzureKeyCredential(key))
76-
response = client.complete(messages=messages) # type: ignore[reportCallIssue, reportArgumentType]
75+
response = client.complete(messages=messages)
7776

7877
print(response.choices[0].message.content)
7978

sdk/ai/azure-ai-inference/samples/sample_chat_completions_from_input_prompty.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@
2323
`your-azure-region` is the Azure region where your model is deployed.
2424
2) AZURE_AI_CHAT_KEY - Your model key (a 32-character string). Keep it secret.
2525
"""
26-
# mypy: disable-error-code="union-attr"
2726
# pyright: reportAttributeAccessIssue=false
2827

2928

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
from azure.ai.inference.prompts._utils import remove_leading_empty_space
2+
3+
4+
def test_success_with_no_changes():
5+
prompt_str = """First line
6+
Second line"""
7+
result = remove_leading_empty_space(prompt_str)
8+
assert result == prompt_str
9+
10+
11+
def test_success_by_remove_leading_empty_space():
12+
prompt_str = """
13+
14+
First line
15+
16+
Second line
17+
Third line
18+
"""
19+
result = remove_leading_empty_space(prompt_str)
20+
assert (
21+
result
22+
== """First line
23+
24+
Second line
25+
Third line"""
26+
)

0 commit comments

Comments
 (0)