Skip to content

Commit 7e14649

Browse files
Merge branch 'Azure:main' into main
2 parents 285fd04 + 9f6efdf commit 7e14649

File tree

753 files changed

+172559
-66497
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

753 files changed

+172559
-66497
lines changed

.github/CODEOWNERS

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,8 @@
6262

6363
/src/aks-preview/ @andyzhangx @andyliuliming @fumingzhang
6464

65+
/src/aks-agent/ @nilo19 @mainerd
66+
6567
/src/bastion/ @aavalang
6668

6769
/src/vm-repair/ @haagha
@@ -333,3 +335,5 @@
333335
/src/amlfs/ @Aman-Jain-14 @amajai @mawhite @brpanask @tibanyas
334336

335337
/src/storage-discovery/ @shanefujs @calvinhzy
338+
339+
/src/aks-agent/ @feiskyer @mainred @nilo19
Lines changed: 95 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,95 @@
1+
name: Trigger ADO OneBranch Extension Release Pipeline
2+
3+
# Run this workflow every time a commit gets pushed to main
4+
# This triggers the ADO OneBranch Extension Release Pipeline
5+
on:
6+
push:
7+
branches:
8+
- main
9+
10+
permissions:
11+
contents: read
12+
id-token: write
13+
14+
jobs:
15+
build:
16+
name: Trigger Extension Release Pipeline
17+
runs-on: ubuntu-latest
18+
steps:
19+
- name: Harden Runner
20+
uses: step-security/harden-runner@63c24ba6bd7ba022e95695ff85de572c04a18142 # v2.7.0
21+
with:
22+
egress-policy: audit
23+
- name: Azure login
24+
uses: azure/login@v2
25+
with:
26+
client-id: ${{ secrets.ADO_SP_ClientID }}
27+
tenant-id: ${{ secrets.ADO_SP_TenantID }}
28+
allow-no-subscriptions: true
29+
- name: Trigger ADO Pipeline and Wait for Completion
30+
uses: azure/cli@v2
31+
env:
32+
ado-org: ${{secrets.ADO_ORGANIZATION}}
33+
ado-project: ${{secrets.ADO_PROJECT}}
34+
ado-pipeline-id: 396380
35+
commit-id: ${{ github.sha }}
36+
with:
37+
inlineScript: |
38+
# Trigger the pipeline and capture the build ID
39+
echo "Triggering ADO pipeline..."
40+
BUILD_RESULT=$(az pipelines build queue \
41+
--definition-id ${{ env.ado-pipeline-id }} \
42+
--organization ${{ env.ado-org }} \
43+
--project ${{ env.ado-project }} \
44+
--variables commit_id=${{ env.commit-id }} \
45+
--output json)
46+
47+
BUILD_ID=$(echo $BUILD_RESULT | jq -r '.id')
48+
echo "Pipeline triggered with Build ID: $BUILD_ID"
49+
50+
if [ "$BUILD_ID" = "null" ] || [ -z "$BUILD_ID" ]; then
51+
echo "Failed to get build ID from pipeline trigger"
52+
exit 1
53+
fi
54+
55+
# Wait for the build to complete
56+
echo "Waiting for build $BUILD_ID to complete..."
57+
while true; do
58+
BUILD_JSON=$(az pipelines build show \
59+
--id $BUILD_ID \
60+
--organization ${{ env.ado-org }} \
61+
--project ${{ env.ado-project }} \
62+
--output json)
63+
64+
BUILD_STATUS=$(echo "$BUILD_JSON" | jq -r '.status')
65+
BUILD_RESULT_STATUS=$(echo "$BUILD_JSON" | jq -r '.result // "none"')
66+
67+
echo "Current status: $BUILD_STATUS, Result: $BUILD_RESULT_STATUS"
68+
69+
# Check if build is completed
70+
if [ "$BUILD_STATUS" = "completed" ]; then
71+
echo "Build completed with result: $BUILD_RESULT_STATUS"
72+
73+
# Check if the build was successful
74+
if [ "$BUILD_RESULT_STATUS" = "succeeded" ]; then
75+
echo "✅ ADO pipeline build succeeded!"
76+
exit 0
77+
elif [ "$BUILD_RESULT_STATUS" = "partiallySucceeded" ]; then
78+
echo "⚠️ ADO pipeline build partially succeeded"
79+
exit 1
80+
else
81+
echo "❌ ADO pipeline build failed with result: $BUILD_RESULT_STATUS"
82+
exit 1
83+
fi
84+
fi
85+
86+
# Check for other terminal states
87+
if [ "$BUILD_STATUS" = "cancelling" ] || [ "$BUILD_STATUS" = "cancelled" ]; then
88+
echo "❌ ADO pipeline build was cancelled"
89+
exit 1
90+
fi
91+
92+
# Wait 30 seconds before checking again
93+
echo "Build still running... waiting 30 seconds"
94+
sleep 30
95+
done

linter_exclusions.yml

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3402,3 +3402,43 @@ eventgrid namespace topic update:
34023402
event_retention_in_days:
34033403
rule_exclusions:
34043404
- option_length_too_long
3405+
3406+
neon postgres endpoint create:
3407+
rule_exclusions:
3408+
- missing_command_example
3409+
3410+
neon postgres neon-role create:
3411+
rule_exclusions:
3412+
- missing_command_example
3413+
3414+
neon postgres neon-database create:
3415+
rule_exclusions:
3416+
- missing_command_example
3417+
3418+
neon postgres get-postgres-version:
3419+
rule_exclusions:
3420+
- missing_command_example
3421+
3422+
neon postgres branch:
3423+
rule_exclusions:
3424+
- require_wait_command_if_no_wait
3425+
3426+
neon postgres endpoint:
3427+
rule_exclusions:
3428+
- require_wait_command_if_no_wait
3429+
3430+
neon postgres neon-database:
3431+
rule_exclusions:
3432+
- require_wait_command_if_no_wait
3433+
3434+
neon postgres neon-role:
3435+
rule_exclusions:
3436+
- require_wait_command_if_no_wait
3437+
3438+
neon postgres organization:
3439+
rule_exclusions:
3440+
- require_wait_command_if_no_wait
3441+
3442+
neon postgres project:
3443+
rule_exclusions:
3444+
- require_wait_command_if_no_wait

src/aem/HISTORY.rst

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,10 @@
22
33
Release History
44
===============
5+
1.0.1
6+
+++++
7+
* Remove DATA_COSMOS_TABLE and DATA_STORAGE references
8+
59
1.0.0
610
+++++
711
* Remove msrestazure dependency

src/aem/azext_aem/custom.py

Lines changed: 16 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -463,7 +463,7 @@ def _verify_old(self, skip_storage_check, wait_time_in_minutes, aem_ext):
463463
else:
464464
logger.warning("\t\tStorage Metrics configuration check for '%s'...", storage_account_name)
465465
storage_client = self._get_storage_client(storage_account_name, disk['key'])
466-
service_properties = storage_client.get_blob_service_properties()
466+
service_properties = storage_client.get_service_properties()
467467
storage_cfg_ok = EnhancedMonitoring._check_storage_analytics(service_properties)
468468
if storage_cfg_ok:
469469
logger.warning('\t\tStorage Metrics configuration check: %s', succ_word)
@@ -731,44 +731,47 @@ def _get_disk_info(self):
731731
return disks_info
732732

733733
def _get_blob_size(self, storage_account_name, container, blob, key):
734-
storage_client = self._get_storage_client(storage_account_name, key)
734+
blob_service_client = self._get_storage_client(storage_account_name, key)
735+
blob_client = blob_service_client.get_blob_client(container, blob)
735736
# convert to GB
736-
return int(storage_client.get_blob_properties(container, blob).properties.content_length / (1 << 30))
737+
return int(blob_client.properties.size / (1 << 30))
737738

738739
def _get_storage_client(self, storage_account_name, key):
739-
BlockBlobService = get_sdk(self._cmd.cli_ctx, ResourceType.DATA_STORAGE,
740-
'blob.blockblobservice#BlockBlobService')
740+
BlobServiceClient = get_sdk(self._cmd.cli_ctx, ResourceType.DATA_STORAGE_BLOB,
741+
'_blob_service_client#BlobServiceClient')
741742
return get_data_service_client(
742743
self._cmd.cli_ctx,
743-
BlockBlobService,
744+
BlobServiceClient,
744745
storage_account_name,
745746
key,
746747
endpoint_suffix=self._cmd.cli_ctx.cloud.suffixes.storage_endpoint) # pylint: disable=no-member
747748

748749
def _enable_storage_analytics(self, storage_account_name, key):
749750
storage_client = self._get_storage_client(storage_account_name, key)
750-
service_properties = storage_client.get_blob_service_properties()
751+
service_properties = storage_client.get_service_properties()
751752
if not EnhancedMonitoring._check_storage_analytics(service_properties):
752-
t_logging, t_retention_policy, t_metrics = get_sdk(self._cmd.cli_ctx, ResourceType.DATA_STORAGE, 'Logging',
753-
'RetentionPolicy', 'Metrics', mod='common.models')
753+
t_logging, t_retention_policy, t_metrics = get_sdk(self._cmd.cli_ctx, ResourceType.DATA_STORAGE_BLOB,
754+
'BlobAnalyticsLogging', 'RetentionPolicy', 'Metrics',
755+
mod='_models')
754756
retention_policy = t_retention_policy(enabled=True, days=13)
755757
logging = t_logging(delete=True, read=True, write=True, retention_policy=retention_policy)
756758
minute_metrics = t_metrics(enabled=True, include_apis=True, retention_policy=retention_policy)
757759
if getattr(service_properties, 'hour_metrics', None):
758760
service_properties.hour_metrics.retention_policy.days = 13
759-
storage_client.set_blob_service_properties(logging, minute_metrics=minute_metrics,
760-
hour_metrics=service_properties.hour_metrics)
761+
storage_client.set_service_properties(analytics_logging=logging, minute_metrics=minute_metrics,
762+
hour_metrics=service_properties.hour_metrics)
761763

762764
@staticmethod
763765
def _check_storage_analytics(service_properties):
764-
return (service_properties and service_properties.logging and
766+
return (service_properties and service_properties.analytics_logging and
765767
service_properties.minute_metrics and service_properties.minute_metrics.include_apis and
766768
service_properties.minute_metrics.retention_policy.days)
767769

768770
def _check_table_and_content(self, storage_account_name, key, table_name,
769771
filter_string, timeout_in_minutes):
770772
sleep_period = 15
771-
TableService = get_sdk(self._cmd.cli_ctx, ResourceType.DATA_COSMOS_TABLE, 'table#TableService')
773+
TableService = get_sdk(self._cmd.cli_ctx, ResourceType.DATA_STORAGE_TABLE,
774+
'_table_service_client#TableServiceClient')
772775
table_client = get_data_service_client(
773776
self._cmd.cli_ctx,
774777
TableService,

0 commit comments

Comments
 (0)