Skip to content

Commit cb7e4df

Browse files
committed
Merge branch 'lusu/agentserver-core' into lusu/agentserver-langgraph
2 parents be5612d + 76b0fe3 commit cb7e4df

File tree

52 files changed

+3251
-274
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

52 files changed

+3251
-274
lines changed

doc/dev/test_proxy_troubleshooting.md

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -275,6 +275,9 @@ To run recorded tests successfully when recorded values are inconsistent or rand
275275
proxy provides a `variables` API. This makes it possible for a test to record the values of variables that were used
276276
during recording and use the same values in playback mode without a sanitizer.
277277

278+
Note that the recorded variables **must** have string values. For example, trying to record an integer value for a
279+
variable will cause a test proxy error.
280+
278281
For example, imagine that a test uses a randomized `table_uuid` variable when creating resources. The same random value
279282
for `table_uuid` can be used in playback mode by using this `variables` API.
280283

@@ -293,14 +296,14 @@ class TestExample(AzureRecordedTestCase):
293296
@recorded_by_proxy
294297
def test_example(self, **kwargs):
295298
# In live mode, variables is an empty dictionary
296-
# In playback mode, the value of variables is {"table_uuid": "random-value"}
299+
# In playback mode, the value of variables is {"current_time": "<previously recorded time>"}
297300
variables = kwargs.pop("variables", {})
298301

299-
# To fetch variable values, use the `setdefault` method to look for a key ("table_uuid")
300-
# and set a real value for that key if it's not present ("random-value")
301-
table_uuid = variables.setdefault("table_uuid", "random-value")
302+
# To fetch variable values, use the `setdefault` method to look for a key ("current_time")
303+
# and set a real value for that key if it's not present (str(time.time()))
304+
# Note that time.time() is converted from a float to a string to record it properly
305+
current_time = variables.setdefault("current_time", str(time.time()))
302306

303-
# use variables["table_uuid"] when using the table UUID throughout the test
304307
...
305308

306309
# return the variables at the end of the test to record them

eng/pipelines/templates/steps/build-package-artifacts.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -109,6 +109,7 @@ steps:
109109
condition: and(succeeded(), or(eq(variables['ENABLE_EXTENSION_BUILD'], 'true'), eq('${{ parameters.ArtifactSuffix }}', 'linux')))
110110
111111
- script: |
112+
sudo dpkg --configure -a
112113
sudo apt-get update
113114
sudo apt-get install -y qemu-user-static binfmt-support
114115
sudo update-binfmts --enable qemu-aarch64

eng/pipelines/templates/steps/install-portaudio.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
steps:
22
- script: |
33
if [[ "$AGENT_OS" == "Linux" ]]; then
4+
sudo dpkg --configure -a
45
sudo apt-get update
56
sudo apt-get install -y portaudio19-dev libasound2-dev
67
elif [[ "$AGENT_OS" == "Darwin" ]]; then

eng/tools/azure-sdk-tools/ci_tools/functions.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@
5757

5858
TEST_COMPATIBILITY_MAP = {
5959
"azure-ai-ml": ">=3.7",
60-
"azure-ai-agentserver-core": ">=3.9", # override to allow build with python 3.9
60+
"azure-ai-agentserver-core": ">=3.9", # override to allow build with python 3.9
6161
}
6262
TEST_PYTHON_DISTRO_INCOMPATIBILITY_MAP = {
6363
"azure-storage-blob": "pypy",

eng/tools/azure-sdk-tools/devtools_testutils/proxy_testcase.py

Lines changed: 23 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -114,32 +114,31 @@ def start_record_or_playback(test_id: str) -> "Tuple[str, Dict[str, str]]":
114114

115115

116116
def stop_record_or_playback(test_id: str, recording_id: str, test_variables: "Dict[str, str]") -> None:
117-
try:
118-
http_client = get_http_client()
119-
if is_live():
120-
http_client.request(
121-
method="POST",
122-
url=RECORDING_STOP_URL,
123-
headers={
124-
"x-recording-file": test_id,
125-
"x-recording-id": recording_id,
126-
"x-recording-save": "true",
127-
"Content-Type": "application/json",
128-
},
129-
# tests don't record successfully unless test_variables is a dictionary
130-
body=json.dumps(test_variables).encode("utf-8") if test_variables else "{}",
131-
)
132-
else:
133-
http_client.request(
134-
method="POST",
135-
url=PLAYBACK_STOP_URL,
136-
headers={"x-recording-id": recording_id},
137-
)
138-
except HTTPError as e:
117+
http_client = get_http_client()
118+
if is_live():
119+
response = http_client.request(
120+
method="POST",
121+
url=RECORDING_STOP_URL,
122+
headers={
123+
"x-recording-file": test_id,
124+
"x-recording-id": recording_id,
125+
"x-recording-save": "true",
126+
"Content-Type": "application/json",
127+
},
128+
# tests don't record successfully unless test_variables is a dictionary
129+
body=json.dumps(test_variables or {}).encode("utf-8"),
130+
)
131+
else:
132+
response = http_client.request(
133+
method="POST",
134+
url=PLAYBACK_STOP_URL,
135+
headers={"x-recording-id": recording_id},
136+
)
137+
if response.status >= 400:
139138
raise HttpResponseError(
140139
"The test proxy ran into an error while ending the session. Make sure any test variables you record have "
141-
"string values."
142-
) from e
140+
f"string values. Full error details: {response.data}"
141+
)
143142

144143

145144
def get_proxy_netloc() -> "Dict[str, str]":
Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
include *.md
2+
include LICENSE
3+
recursive-include tests *.py
4+
recursive-include samples *.py *.md
5+
recursive-include doc *.rst *.md
6+
include azure/__init__.py
7+
include azure/ai/__init__.py
8+
include azure/ai/agentserver/__init__.py

sdk/ai/azure-ai-agentserver-core/azure/ai/agentserver/core/logger.py

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# pylint: disable=broad-exception-caught
1+
# pylint: disable=broad-exception-caught,dangerous-default-value
22
# ---------------------------------------------------------
33
# Copyright (c) Microsoft Corporation. All rights reserved.
44
# ---------------------------------------------------------
@@ -52,9 +52,8 @@ def get_project_endpoint():
5252
account = parts[0]
5353
project = parts[1]
5454
return f"https://{account}.services.ai.azure.com/api/projects/{project}"
55-
else:
56-
print("environment variable AGENT_PROJECT_RESOURCE_ID not set.")
57-
return None
55+
print("environment variable AGENT_PROJECT_RESOURCE_ID not set.")
56+
return None
5857

5958

6059
def get_application_insights_connstr():
@@ -97,6 +96,9 @@ def configure(log_config: dict = default_log_config):
9796
"""
9897
Configure logging based on the provided configuration dictionary.
9998
The dictionary should contain the logging configuration in a format compatible with `logging.config.dictConfig`.
99+
100+
:param log_config: A dictionary containing logging configuration.
101+
:type log_config: dict
100102
"""
101103
try:
102104
config.dictConfig(log_config)
@@ -106,7 +108,6 @@ def configure(log_config: dict = default_log_config):
106108
os.environ.get(Constants.ENABLE_APPLICATION_INSIGHTS_LOGGER, "true").lower() == "true"
107109
)
108110
if application_insights_connection_string and enable_application_insights_logger:
109-
from azure.monitor.opentelemetry.exporter import AzureMonitorLogExporter
110111
from opentelemetry._logs import set_logger_provider
111112
from opentelemetry.sdk._logs import (
112113
LoggerProvider,
@@ -115,6 +116,8 @@ def configure(log_config: dict = default_log_config):
115116
from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
116117
from opentelemetry.sdk.resources import Resource
117118

119+
from azure.monitor.opentelemetry.exporter import AzureMonitorLogExporter
120+
118121
logger_provider = LoggerProvider(resource=Resource.create({"service.name": "azure.ai.agentshosting"}))
119122
set_logger_provider(logger_provider)
120123

@@ -135,7 +138,6 @@ def configure(log_config: dict = default_log_config):
135138

136139
except Exception as e:
137140
print(f"Failed to configure logging: {e}")
138-
pass
139141

140142

141143
def get_log_level():
@@ -147,8 +149,11 @@ def get_log_level():
147149
return log_level
148150

149151

150-
def get_logger():
152+
def get_logger() -> logging.Logger:
151153
"""
152154
If the logger is not already configured, it will be initialized with default settings.
155+
156+
:return: Configured logger instance.
157+
:rtype: logging.Logger
153158
"""
154159
return logging.getLogger("azure.ai.agentshosting")
Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,16 @@
11
# ---------------------------------------------------------
22
# Copyright (c) Microsoft Corporation. All rights reserved.
33
# ---------------------------------------------------------
4+
"""
5+
Re-exports of OpenAI SDK response types.
6+
7+
This module re-exports types from the OpenAI SDK for convenience.
8+
These types are fully documented in the OpenAI SDK documentation.
9+
10+
.. note::
11+
This module re-exports OpenAI SDK types. For detailed documentation,
12+
please refer to the `OpenAI Python SDK documentation <https://github.com/openai/openai-python>`_.
13+
"""
414
from openai.types.responses import * # pylint: disable=unused-wildcard-import
515

616
__all__ = [name for name in globals() if not name.startswith("_")] # type: ignore[var-annotated]

sdk/ai/azure-ai-agentserver-core/azure/ai/agentserver/core/models/projects/_enums.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -663,12 +663,12 @@ class ScheduleTaskType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
663663
class ServiceTier(str, Enum, metaclass=CaseInsensitiveEnumMeta):
664664
"""Specifies the processing type used for serving the request.
665665
* If set to 'auto', then the request will be processed with the service tier configured in the
666-
Project settings. Unless otherwise configured, the Project will use 'default'.
666+
Project settings. Unless otherwise configured, the Project will use 'default'.
667667
* If set to 'default', then the request will be processed with the standard pricing and
668-
performance for the selected model.
668+
performance for the selected model.
669669
* If set to '[flex](/docs/guides/flex-processing)' or 'priority', then the request will be
670-
processed with the corresponding service tier. [Contact
671-
sales](https://openai.com/contact-sales) to learn more about Priority processing.
670+
processed with the corresponding service tier. [Contact
671+
sales](https://openai.com/contact-sales) to learn more about Priority processing.
672672
* When not set, the default behavior is 'auto'.
673673
When the ``service_tier`` parameter is set, the response body will include the
674674
``service_tier`` value based on the processing mode actually used to serve the request. This

0 commit comments

Comments
 (0)