diff --git a/sdk/resourcemanager/storagecache/armstoragecache/CHANGELOG.md b/sdk/resourcemanager/storagecache/armstoragecache/CHANGELOG.md index be4a1f8fb661..a7dcae0b0e6b 100644 --- a/sdk/resourcemanager/storagecache/armstoragecache/CHANGELOG.md +++ b/sdk/resourcemanager/storagecache/armstoragecache/CHANGELOG.md @@ -1,5 +1,28 @@ # Release History +## 4.1.0 (2025-01-21) +### Features Added + +- New enum type `AutoExportJobAdminStatus` with values `AutoExportJobAdminStatusActive`, `AutoExportJobAdminStatusCancel` +- New enum type `AutoExportJobProvisioningStateType` with values `AutoExportJobProvisioningStateTypeCanceled`, `AutoExportJobProvisioningStateTypeCreating`, `AutoExportJobProvisioningStateTypeDeleting`, `AutoExportJobProvisioningStateTypeFailed`, `AutoExportJobProvisioningStateTypeSucceeded`, `AutoExportJobProvisioningStateTypeUpdating` +- New enum type `AutoExportStatusType` with values `AutoExportStatusTypeDisableFailed`, `AutoExportStatusTypeDisabled`, `AutoExportStatusTypeDisabling`, `AutoExportStatusTypeFailed`, `AutoExportStatusTypeInProgress` +- New enum type `ImportJobAdminStatus` with values `ImportJobAdminStatusActive`, `ImportJobAdminStatusCancel` +- New function `NewAutoExportJobsClient(string, azcore.TokenCredential, *arm.ClientOptions) (*AutoExportJobsClient, error)` +- New function `*AutoExportJobsClient.BeginCreateOrUpdate(context.Context, string, string, string, AutoExportJob, *AutoExportJobsClientBeginCreateOrUpdateOptions) (*runtime.Poller[AutoExportJobsClientCreateOrUpdateResponse], error)` +- New function `*AutoExportJobsClient.BeginDelete(context.Context, string, string, string, *AutoExportJobsClientBeginDeleteOptions) (*runtime.Poller[AutoExportJobsClientDeleteResponse], error)` +- New function `*AutoExportJobsClient.Get(context.Context, string, string, string, *AutoExportJobsClientGetOptions) (AutoExportJobsClientGetResponse, error)` +- New function `*AutoExportJobsClient.NewListByAmlFilesystemPager(string, string, *AutoExportJobsClientListByAmlFilesystemOptions) *runtime.Pager[AutoExportJobsClientListByAmlFilesystemResponse]` +- New function `*AutoExportJobsClient.BeginUpdate(context.Context, string, string, string, AutoExportJobUpdate, *AutoExportJobsClientBeginUpdateOptions) (*runtime.Poller[AutoExportJobsClientUpdateResponse], error)` +- New function `*ClientFactory.NewAutoExportJobsClient() *AutoExportJobsClient` +- New struct `AutoExportJob` +- New struct `AutoExportJobProperties` +- New struct `AutoExportJobPropertiesStatus` +- New struct `AutoExportJobUpdate` +- New struct `AutoExportJobsListResult` +- New field `AdminStatus` in struct `ImportJobProperties` +- New field `ImportedDirectories`, `ImportedFiles`, `ImportedSymlinks`, `PreexistingDirectories`, `PreexistingFiles`, `PreexistingSymlinks` in struct `ImportJobPropertiesStatus` + + ## 4.0.0 (2024-05-24) ### Breaking Changes diff --git a/sdk/resourcemanager/storagecache/armstoragecache/amlfilesystems_client.go b/sdk/resourcemanager/storagecache/armstoragecache/amlfilesystems_client.go index 302294dc5c42..a23c6e8b2797 100644 --- a/sdk/resourcemanager/storagecache/armstoragecache/amlfilesystems_client.go +++ b/sdk/resourcemanager/storagecache/armstoragecache/amlfilesystems_client.go @@ -46,7 +46,7 @@ func NewAmlFilesystemsClient(subscriptionID string, credential azcore.TokenCrede // Archive - Archive data from the AML file system. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - amlFilesystemName - Name for the AML file system. Allows alphanumerics, underscores, and hyphens. Start and end with alphanumeric. // - options - AmlFilesystemsClientArchiveOptions contains the optional parameters for the AmlFilesystemsClient.Archive method. @@ -91,7 +91,7 @@ func (client *AmlFilesystemsClient) archiveCreateRequest(ctx context.Context, re return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.ArchiveInfo != nil { @@ -106,7 +106,7 @@ func (client *AmlFilesystemsClient) archiveCreateRequest(ctx context.Context, re // CancelArchive - Cancel archiving data from the AML file system. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - amlFilesystemName - Name for the AML file system. Allows alphanumerics, underscores, and hyphens. Start and end with alphanumeric. // - options - AmlFilesystemsClientCancelArchiveOptions contains the optional parameters for the AmlFilesystemsClient.CancelArchive @@ -152,7 +152,7 @@ func (client *AmlFilesystemsClient) cancelArchiveCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -161,7 +161,7 @@ func (client *AmlFilesystemsClient) cancelArchiveCreateRequest(ctx context.Conte // BeginCreateOrUpdate - Create or update an AML file system. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - amlFilesystemName - Name for the AML file system. Allows alphanumerics, underscores, and hyphens. Start and end with alphanumeric. // - amlFilesystem - Object containing the user-selectable properties of the AML file system. If read-only properties are included, @@ -189,7 +189,7 @@ func (client *AmlFilesystemsClient) BeginCreateOrUpdate(ctx context.Context, res // CreateOrUpdate - Create or update an AML file system. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 func (client *AmlFilesystemsClient) createOrUpdate(ctx context.Context, resourceGroupName string, amlFilesystemName string, amlFilesystem AmlFilesystem, options *AmlFilesystemsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "AmlFilesystemsClient.BeginCreateOrUpdate" @@ -231,7 +231,7 @@ func (client *AmlFilesystemsClient) createOrUpdateCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, amlFilesystem); err != nil { @@ -243,7 +243,7 @@ func (client *AmlFilesystemsClient) createOrUpdateCreateRequest(ctx context.Cont // BeginDelete - Schedules an AML file system for deletion. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - amlFilesystemName - Name for the AML file system. Allows alphanumerics, underscores, and hyphens. Start and end with alphanumeric. // - options - AmlFilesystemsClientBeginDeleteOptions contains the optional parameters for the AmlFilesystemsClient.BeginDelete @@ -269,7 +269,7 @@ func (client *AmlFilesystemsClient) BeginDelete(ctx context.Context, resourceGro // Delete - Schedules an AML file system for deletion. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 func (client *AmlFilesystemsClient) deleteOperation(ctx context.Context, resourceGroupName string, amlFilesystemName string, options *AmlFilesystemsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "AmlFilesystemsClient.BeginDelete" @@ -311,7 +311,7 @@ func (client *AmlFilesystemsClient) deleteCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -320,7 +320,7 @@ func (client *AmlFilesystemsClient) deleteCreateRequest(ctx context.Context, res // Get - Returns an AML file system. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - amlFilesystemName - Name for the AML file system. Allows alphanumerics, underscores, and hyphens. Start and end with alphanumeric. // - options - AmlFilesystemsClientGetOptions contains the optional parameters for the AmlFilesystemsClient.Get method. @@ -366,7 +366,7 @@ func (client *AmlFilesystemsClient) getCreateRequest(ctx context.Context, resour return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -383,7 +383,7 @@ func (client *AmlFilesystemsClient) getHandleResponse(resp *http.Response) (AmlF // NewListPager - Returns all AML file systems the user has access to under a subscription. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - options - AmlFilesystemsClientListOptions contains the optional parameters for the AmlFilesystemsClient.NewListPager method. func (client *AmlFilesystemsClient) NewListPager(options *AmlFilesystemsClientListOptions) *runtime.Pager[AmlFilesystemsClientListResponse] { return runtime.NewPager(runtime.PagingHandler[AmlFilesystemsClientListResponse]{ @@ -420,7 +420,7 @@ func (client *AmlFilesystemsClient) listCreateRequest(ctx context.Context, optio return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -437,7 +437,7 @@ func (client *AmlFilesystemsClient) listHandleResponse(resp *http.Response) (Aml // NewListByResourceGroupPager - Returns all AML file systems the user has access to under a resource group. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - options - AmlFilesystemsClientListByResourceGroupOptions contains the optional parameters for the AmlFilesystemsClient.NewListByResourceGroupPager // method. @@ -480,7 +480,7 @@ func (client *AmlFilesystemsClient) listByResourceGroupCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -498,7 +498,7 @@ func (client *AmlFilesystemsClient) listByResourceGroupHandleResponse(resp *http // BeginUpdate - Update an AML file system instance. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - amlFilesystemName - Name for the AML file system. Allows alphanumerics, underscores, and hyphens. Start and end with alphanumeric. // - amlFilesystem - Object containing the user-selectable properties of the AML file system. If read-only properties are included, @@ -526,7 +526,7 @@ func (client *AmlFilesystemsClient) BeginUpdate(ctx context.Context, resourceGro // Update - Update an AML file system instance. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 func (client *AmlFilesystemsClient) update(ctx context.Context, resourceGroupName string, amlFilesystemName string, amlFilesystem AmlFilesystemUpdate, options *AmlFilesystemsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "AmlFilesystemsClient.BeginUpdate" @@ -568,7 +568,7 @@ func (client *AmlFilesystemsClient) updateCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, amlFilesystem); err != nil { diff --git a/sdk/resourcemanager/storagecache/armstoragecache/amlfilesystems_client_example_test.go b/sdk/resourcemanager/storagecache/armstoragecache/amlfilesystems_client_example_test.go deleted file mode 100644 index a00f9fe904e3..000000000000 --- a/sdk/resourcemanager/storagecache/armstoragecache/amlfilesystems_client_example_test.go +++ /dev/null @@ -1,802 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armstoragecache_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storagecache/armstoragecache/v4" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/amlFilesystems_List.json -func ExampleAmlFilesystemsClient_NewListPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewAmlFilesystemsClient().NewListPager(nil) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.AmlFilesystemsListResult = armstoragecache.AmlFilesystemsListResult{ - // Value: []*armstoragecache.AmlFilesystem{ - // { - // Name: to.Ptr("fs1"), - // Type: to.Ptr("Microsoft.StorageCache/amlFilesystem"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.StorageCache/amlFilesystems/fs1"), - // Location: to.Ptr("eastus"), - // Tags: map[string]*string{ - // "Dept": to.Ptr("ContosoAds"), - // }, - // Identity: &armstoragecache.AmlFilesystemIdentity{ - // Type: to.Ptr(armstoragecache.AmlFilesystemIdentityTypeUserAssigned), - // UserAssignedIdentities: map[string]*armstoragecache.UserAssignedIdentitiesValue{ - // "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/identity1": &armstoragecache.UserAssignedIdentitiesValue{ - // }, - // }, - // }, - // Properties: &armstoragecache.AmlFilesystemProperties{ - // ClientInfo: &armstoragecache.AmlFilesystemClientInfo{ - // ContainerStorageInterface: &armstoragecache.AmlFilesystemContainerStorageInterface{ - // PersistentVolume: to.Ptr(""), - // PersistentVolumeClaim: to.Ptr(""), - // StorageClass: to.Ptr(""), - // }, - // LustreVersion: to.Ptr("2.15.0"), - // MgsAddress: to.Ptr("10.0.0.4"), - // MountCommand: to.Ptr("mount -t lustre 10.0.0.4@tcp:/lustrefs /lustre/lustrefs"), - // }, - // EncryptionSettings: &armstoragecache.AmlFilesystemEncryptionSettings{ - // KeyEncryptionKey: &armstoragecache.KeyVaultKeyReference{ - // KeyURL: to.Ptr("https://examplekv.vault.azure.net/keys/kvk/3540a47df75541378d3518c6a4bdf5af"), - // SourceVault: &armstoragecache.KeyVaultKeyReferenceSourceVault{ - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.KeyVault/vaults/keyvault-cmk"), - // }, - // }, - // }, - // FilesystemSubnet: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/fsSub1"), - // Health: &armstoragecache.AmlFilesystemHealth{ - // State: to.Ptr(armstoragecache.AmlFilesystemHealthStateTypeAvailable), - // StatusDescription: to.Ptr("amlFilesystem is ok."), - // }, - // Hsm: &armstoragecache.AmlFilesystemPropertiesHsm{ - // ArchiveStatus: []*armstoragecache.AmlFilesystemArchive{ - // { - // FilesystemPath: to.Ptr("/"), - // Status: &armstoragecache.AmlFilesystemArchiveStatus{ - // LastCompletionTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-04-21T18:25:43.511Z"); return t}()), - // LastStartedTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-04-21T17:25:43.511Z"); return t}()), - // State: to.Ptr(armstoragecache.ArchiveStatusTypeCompleted), - // }, - // }}, - // Settings: &armstoragecache.AmlFilesystemHsmSettings{ - // Container: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Storage/storageAccounts/storageaccountname/blobServices/default/containers/containername"), - // ImportPrefixesInitial: []*string{ - // to.Ptr("/")}, - // LoggingContainer: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Storage/storageAccounts/storageaccountname/blobServices/default/containers/loggingcontainername"), - // }, - // }, - // MaintenanceWindow: &armstoragecache.AmlFilesystemPropertiesMaintenanceWindow{ - // DayOfWeek: to.Ptr(armstoragecache.MaintenanceDayOfWeekTypeFriday), - // TimeOfDayUTC: to.Ptr("22:00"), - // }, - // ProvisioningState: to.Ptr(armstoragecache.AmlFilesystemProvisioningStateTypeSucceeded), - // RootSquashSettings: &armstoragecache.AmlFilesystemRootSquashSettings{ - // Mode: to.Ptr(armstoragecache.AmlFilesystemSquashModeAll), - // NoSquashNidLists: to.Ptr("10.0.0.[5-6]@tcp;10.0.1.2@tcp"), - // SquashGID: to.Ptr[int64](99), - // SquashUID: to.Ptr[int64](99), - // Status: to.Ptr("nodemap.active=1"), - // }, - // StorageCapacityTiB: to.Ptr[float32](16), - // ThroughputProvisionedMBps: to.Ptr[int32](500), - // }, - // SKU: &armstoragecache.SKUName{ - // Name: to.Ptr("AMLFS-Durable-Premium-250"), - // }, - // Zones: []*string{ - // to.Ptr("1")}, - // }, - // { - // Name: to.Ptr("fs2"), - // Type: to.Ptr("Microsoft.StorageCache/amlFilesystem"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.StorageCache/amlFilesystems/fs2"), - // Location: to.Ptr("eastus"), - // Tags: map[string]*string{ - // "Dept": to.Ptr("ContosoAds"), - // }, - // Identity: &armstoragecache.AmlFilesystemIdentity{ - // Type: to.Ptr(armstoragecache.AmlFilesystemIdentityTypeUserAssigned), - // UserAssignedIdentities: map[string]*armstoragecache.UserAssignedIdentitiesValue{ - // "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/identity1": &armstoragecache.UserAssignedIdentitiesValue{ - // }, - // }, - // }, - // Properties: &armstoragecache.AmlFilesystemProperties{ - // ClientInfo: &armstoragecache.AmlFilesystemClientInfo{ - // ContainerStorageInterface: &armstoragecache.AmlFilesystemContainerStorageInterface{ - // PersistentVolume: to.Ptr(""), - // PersistentVolumeClaim: to.Ptr(""), - // StorageClass: to.Ptr(""), - // }, - // LustreVersion: to.Ptr("2.15.0"), - // MgsAddress: to.Ptr("10.0.0.4"), - // MountCommand: to.Ptr("mount -t lustre 10.0.0.4@tcp:/lustrefs /lustre/lustrefs"), - // }, - // EncryptionSettings: &armstoragecache.AmlFilesystemEncryptionSettings{ - // KeyEncryptionKey: &armstoragecache.KeyVaultKeyReference{ - // KeyURL: to.Ptr("https://examplekv.vault.azure.net/keys/kvk/3540a47df75541378d3518c6a4bdf5af"), - // SourceVault: &armstoragecache.KeyVaultKeyReferenceSourceVault{ - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.KeyVault/vaults/keyvault-cmk"), - // }, - // }, - // }, - // FilesystemSubnet: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/fsSub2"), - // Health: &armstoragecache.AmlFilesystemHealth{ - // State: to.Ptr(armstoragecache.AmlFilesystemHealthStateTypeAvailable), - // StatusDescription: to.Ptr("amlFilesystem is ok."), - // }, - // Hsm: &armstoragecache.AmlFilesystemPropertiesHsm{ - // ArchiveStatus: []*armstoragecache.AmlFilesystemArchive{ - // { - // FilesystemPath: to.Ptr("/"), - // Status: &armstoragecache.AmlFilesystemArchiveStatus{ - // LastCompletionTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-04-21T18:25:43.511Z"); return t}()), - // LastStartedTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-04-21T17:25:43.511Z"); return t}()), - // State: to.Ptr(armstoragecache.ArchiveStatusTypeCompleted), - // }, - // }}, - // Settings: &armstoragecache.AmlFilesystemHsmSettings{ - // Container: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Storage/storageAccounts/storageaccountname/blobServices/default/containers/containername"), - // ImportPrefixesInitial: []*string{ - // to.Ptr("/")}, - // LoggingContainer: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Storage/storageAccounts/storageaccountname/blobServices/default/containers/loggingcontainername"), - // }, - // }, - // MaintenanceWindow: &armstoragecache.AmlFilesystemPropertiesMaintenanceWindow{ - // DayOfWeek: to.Ptr(armstoragecache.MaintenanceDayOfWeekTypeFriday), - // TimeOfDayUTC: to.Ptr("22:00"), - // }, - // ProvisioningState: to.Ptr(armstoragecache.AmlFilesystemProvisioningStateTypeSucceeded), - // RootSquashSettings: &armstoragecache.AmlFilesystemRootSquashSettings{ - // Mode: to.Ptr(armstoragecache.AmlFilesystemSquashModeAll), - // NoSquashNidLists: to.Ptr("10.0.0.[5-6]@tcp;10.0.1.2@tcp"), - // SquashGID: to.Ptr[int64](99), - // SquashUID: to.Ptr[int64](99), - // Status: to.Ptr("nodemap.active=1"), - // }, - // StorageCapacityTiB: to.Ptr[float32](16), - // ThroughputProvisionedMBps: to.Ptr[int32](500), - // }, - // SKU: &armstoragecache.SKUName{ - // Name: to.Ptr("AMLFS-Durable-Premium-250"), - // }, - // Zones: []*string{ - // to.Ptr("1")}, - // }}, - // } - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/amlFilesystems_ListByResourceGroup.json -func ExampleAmlFilesystemsClient_NewListByResourceGroupPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewAmlFilesystemsClient().NewListByResourceGroupPager("scgroup", nil) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.AmlFilesystemsListResult = armstoragecache.AmlFilesystemsListResult{ - // Value: []*armstoragecache.AmlFilesystem{ - // { - // Name: to.Ptr("fs1"), - // Type: to.Ptr("Microsoft.StorageCache/amlFilesystem"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.StorageCache/amlFilesystems/fs1"), - // Location: to.Ptr("eastus"), - // Tags: map[string]*string{ - // "Dept": to.Ptr("ContosoAds"), - // }, - // Identity: &armstoragecache.AmlFilesystemIdentity{ - // Type: to.Ptr(armstoragecache.AmlFilesystemIdentityTypeUserAssigned), - // UserAssignedIdentities: map[string]*armstoragecache.UserAssignedIdentitiesValue{ - // "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/identity1": &armstoragecache.UserAssignedIdentitiesValue{ - // }, - // }, - // }, - // Properties: &armstoragecache.AmlFilesystemProperties{ - // ClientInfo: &armstoragecache.AmlFilesystemClientInfo{ - // ContainerStorageInterface: &armstoragecache.AmlFilesystemContainerStorageInterface{ - // PersistentVolume: to.Ptr(""), - // PersistentVolumeClaim: to.Ptr(""), - // StorageClass: to.Ptr(""), - // }, - // LustreVersion: to.Ptr("2.15.0"), - // MgsAddress: to.Ptr("10.0.0.4"), - // MountCommand: to.Ptr("mount -t lustre 10.0.0.4@tcp:/lustrefs /lustre/lustrefs"), - // }, - // EncryptionSettings: &armstoragecache.AmlFilesystemEncryptionSettings{ - // KeyEncryptionKey: &armstoragecache.KeyVaultKeyReference{ - // KeyURL: to.Ptr("https://examplekv.vault.azure.net/keys/kvk/3540a47df75541378d3518c6a4bdf5af"), - // SourceVault: &armstoragecache.KeyVaultKeyReferenceSourceVault{ - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.KeyVault/vaults/keyvault-cmk"), - // }, - // }, - // }, - // FilesystemSubnet: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/fsSub1"), - // Health: &armstoragecache.AmlFilesystemHealth{ - // State: to.Ptr(armstoragecache.AmlFilesystemHealthStateTypeAvailable), - // StatusDescription: to.Ptr("amlFilesystem is ok."), - // }, - // Hsm: &armstoragecache.AmlFilesystemPropertiesHsm{ - // ArchiveStatus: []*armstoragecache.AmlFilesystemArchive{ - // { - // FilesystemPath: to.Ptr("/"), - // Status: &armstoragecache.AmlFilesystemArchiveStatus{ - // LastCompletionTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-04-21T18:25:43.511Z"); return t}()), - // LastStartedTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-04-21T17:25:43.511Z"); return t}()), - // State: to.Ptr(armstoragecache.ArchiveStatusTypeCompleted), - // }, - // }}, - // Settings: &armstoragecache.AmlFilesystemHsmSettings{ - // Container: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Storage/storageAccounts/storageaccountname/blobServices/default/containers/containername"), - // ImportPrefixesInitial: []*string{ - // to.Ptr("/")}, - // LoggingContainer: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Storage/storageAccounts/storageaccountname/blobServices/default/containers/loggingcontainername"), - // }, - // }, - // MaintenanceWindow: &armstoragecache.AmlFilesystemPropertiesMaintenanceWindow{ - // DayOfWeek: to.Ptr(armstoragecache.MaintenanceDayOfWeekTypeFriday), - // TimeOfDayUTC: to.Ptr("22:00"), - // }, - // ProvisioningState: to.Ptr(armstoragecache.AmlFilesystemProvisioningStateTypeSucceeded), - // RootSquashSettings: &armstoragecache.AmlFilesystemRootSquashSettings{ - // Mode: to.Ptr(armstoragecache.AmlFilesystemSquashModeAll), - // NoSquashNidLists: to.Ptr("10.0.0.[5-6]@tcp;10.0.1.2@tcp"), - // SquashGID: to.Ptr[int64](99), - // SquashUID: to.Ptr[int64](99), - // Status: to.Ptr("nodemap.active=1"), - // }, - // StorageCapacityTiB: to.Ptr[float32](16), - // ThroughputProvisionedMBps: to.Ptr[int32](500), - // }, - // SKU: &armstoragecache.SKUName{ - // Name: to.Ptr("AMLFS-Durable-Premium-250"), - // }, - // Zones: []*string{ - // to.Ptr("1")}, - // }, - // { - // Name: to.Ptr("fs2"), - // Type: to.Ptr("Microsoft.StorageCache/amlFilesystem"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.StorageCache/amlFilesystems/fs2"), - // Location: to.Ptr("eastus"), - // Tags: map[string]*string{ - // "Dept": to.Ptr("ContosoAds"), - // }, - // Identity: &armstoragecache.AmlFilesystemIdentity{ - // Type: to.Ptr(armstoragecache.AmlFilesystemIdentityTypeUserAssigned), - // UserAssignedIdentities: map[string]*armstoragecache.UserAssignedIdentitiesValue{ - // "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/identity1": &armstoragecache.UserAssignedIdentitiesValue{ - // }, - // }, - // }, - // Properties: &armstoragecache.AmlFilesystemProperties{ - // ClientInfo: &armstoragecache.AmlFilesystemClientInfo{ - // ContainerStorageInterface: &armstoragecache.AmlFilesystemContainerStorageInterface{ - // PersistentVolume: to.Ptr(""), - // PersistentVolumeClaim: to.Ptr(""), - // StorageClass: to.Ptr(""), - // }, - // LustreVersion: to.Ptr("2.15.0"), - // MgsAddress: to.Ptr("10.0.0.4"), - // MountCommand: to.Ptr("mount -t lustre 10.0.0.4@tcp:/lustrefs /lustre/lustrefs"), - // }, - // EncryptionSettings: &armstoragecache.AmlFilesystemEncryptionSettings{ - // KeyEncryptionKey: &armstoragecache.KeyVaultKeyReference{ - // KeyURL: to.Ptr("https://examplekv.vault.azure.net/keys/kvk/3540a47df75541378d3518c6a4bdf5af"), - // SourceVault: &armstoragecache.KeyVaultKeyReferenceSourceVault{ - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.KeyVault/vaults/keyvault-cmk"), - // }, - // }, - // }, - // FilesystemSubnet: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/fsSub2"), - // Health: &armstoragecache.AmlFilesystemHealth{ - // State: to.Ptr(armstoragecache.AmlFilesystemHealthStateTypeAvailable), - // StatusDescription: to.Ptr("amlFilesystem is ok."), - // }, - // Hsm: &armstoragecache.AmlFilesystemPropertiesHsm{ - // ArchiveStatus: []*armstoragecache.AmlFilesystemArchive{ - // { - // FilesystemPath: to.Ptr("/"), - // Status: &armstoragecache.AmlFilesystemArchiveStatus{ - // LastCompletionTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-04-21T18:25:43.511Z"); return t}()), - // LastStartedTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-04-21T17:25:43.511Z"); return t}()), - // State: to.Ptr(armstoragecache.ArchiveStatusTypeCompleted), - // }, - // }}, - // Settings: &armstoragecache.AmlFilesystemHsmSettings{ - // Container: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Storage/storageAccounts/storageaccountname/blobServices/default/containers/containername"), - // ImportPrefixesInitial: []*string{ - // to.Ptr("/")}, - // LoggingContainer: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Storage/storageAccounts/storageaccountname/blobServices/default/containers/loggingcontainername"), - // }, - // }, - // MaintenanceWindow: &armstoragecache.AmlFilesystemPropertiesMaintenanceWindow{ - // DayOfWeek: to.Ptr(armstoragecache.MaintenanceDayOfWeekTypeFriday), - // TimeOfDayUTC: to.Ptr("22:00"), - // }, - // ProvisioningState: to.Ptr(armstoragecache.AmlFilesystemProvisioningStateTypeSucceeded), - // RootSquashSettings: &armstoragecache.AmlFilesystemRootSquashSettings{ - // Mode: to.Ptr(armstoragecache.AmlFilesystemSquashModeAll), - // NoSquashNidLists: to.Ptr("10.0.0.[5-6]@tcp;10.0.1.2@tcp"), - // SquashGID: to.Ptr[int64](99), - // SquashUID: to.Ptr[int64](99), - // Status: to.Ptr("nodemap.active=1"), - // }, - // StorageCapacityTiB: to.Ptr[float32](16), - // ThroughputProvisionedMBps: to.Ptr[int32](500), - // }, - // SKU: &armstoragecache.SKUName{ - // Name: to.Ptr("AMLFS-Durable-Premium-250"), - // }, - // Zones: []*string{ - // to.Ptr("1")}, - // }}, - // } - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/amlFilesystems_Delete.json -func ExampleAmlFilesystemsClient_BeginDelete() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewAmlFilesystemsClient().BeginDelete(ctx, "scgroup", "fs1", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/amlFilesystems_Get.json -func ExampleAmlFilesystemsClient_Get() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewAmlFilesystemsClient().Get(ctx, "scgroup", "fs1", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.AmlFilesystem = armstoragecache.AmlFilesystem{ - // Name: to.Ptr("fs1"), - // Type: to.Ptr("Microsoft.StorageCache/amlFilesystem"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.StorageCache/amlFilesystems/fs1"), - // Location: to.Ptr("eastus"), - // Tags: map[string]*string{ - // "Dept": to.Ptr("ContosoAds"), - // }, - // Identity: &armstoragecache.AmlFilesystemIdentity{ - // Type: to.Ptr(armstoragecache.AmlFilesystemIdentityTypeUserAssigned), - // UserAssignedIdentities: map[string]*armstoragecache.UserAssignedIdentitiesValue{ - // "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/identity1": &armstoragecache.UserAssignedIdentitiesValue{ - // }, - // }, - // }, - // Properties: &armstoragecache.AmlFilesystemProperties{ - // ClientInfo: &armstoragecache.AmlFilesystemClientInfo{ - // ContainerStorageInterface: &armstoragecache.AmlFilesystemContainerStorageInterface{ - // PersistentVolume: to.Ptr(""), - // PersistentVolumeClaim: to.Ptr(""), - // StorageClass: to.Ptr(""), - // }, - // LustreVersion: to.Ptr("2.15.0"), - // MgsAddress: to.Ptr("10.0.0.4"), - // MountCommand: to.Ptr("mount -t lustre 10.0.0.4@tcp:/lustrefs /lustre/lustrefs"), - // }, - // EncryptionSettings: &armstoragecache.AmlFilesystemEncryptionSettings{ - // KeyEncryptionKey: &armstoragecache.KeyVaultKeyReference{ - // KeyURL: to.Ptr("https://keyvault-cmk.vault.azure.net/keys/key2048/test"), - // SourceVault: &armstoragecache.KeyVaultKeyReferenceSourceVault{ - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.KeyVault/vaults/keyvault-cmk"), - // }, - // }, - // }, - // FilesystemSubnet: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/fsSub"), - // Health: &armstoragecache.AmlFilesystemHealth{ - // State: to.Ptr(armstoragecache.AmlFilesystemHealthStateTypeAvailable), - // StatusDescription: to.Ptr("amlFilesystem is ok."), - // }, - // Hsm: &armstoragecache.AmlFilesystemPropertiesHsm{ - // ArchiveStatus: []*armstoragecache.AmlFilesystemArchive{ - // { - // FilesystemPath: to.Ptr("/"), - // Status: &armstoragecache.AmlFilesystemArchiveStatus{ - // LastCompletionTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-04-21T18:25:43.511Z"); return t}()), - // LastStartedTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-04-21T17:25:43.511Z"); return t}()), - // State: to.Ptr(armstoragecache.ArchiveStatusTypeCompleted), - // }, - // }}, - // Settings: &armstoragecache.AmlFilesystemHsmSettings{ - // Container: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Storage/storageAccounts/storageaccountname/blobServices/default/containers/containername"), - // ImportPrefixesInitial: []*string{ - // to.Ptr("/")}, - // LoggingContainer: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Storage/storageAccounts/storageaccountname/blobServices/default/containers/loggingcontainername"), - // }, - // }, - // MaintenanceWindow: &armstoragecache.AmlFilesystemPropertiesMaintenanceWindow{ - // DayOfWeek: to.Ptr(armstoragecache.MaintenanceDayOfWeekTypeFriday), - // TimeOfDayUTC: to.Ptr("22:00"), - // }, - // ProvisioningState: to.Ptr(armstoragecache.AmlFilesystemProvisioningStateTypeSucceeded), - // RootSquashSettings: &armstoragecache.AmlFilesystemRootSquashSettings{ - // Mode: to.Ptr(armstoragecache.AmlFilesystemSquashModeAll), - // NoSquashNidLists: to.Ptr("10.0.0.[5-6]@tcp;10.0.1.2@tcp"), - // SquashGID: to.Ptr[int64](99), - // SquashUID: to.Ptr[int64](99), - // Status: to.Ptr("nodemap.active=1"), - // }, - // StorageCapacityTiB: to.Ptr[float32](16), - // ThroughputProvisionedMBps: to.Ptr[int32](500), - // }, - // SKU: &armstoragecache.SKUName{ - // Name: to.Ptr("AMLFS-Durable-Premium-250"), - // }, - // Zones: []*string{ - // to.Ptr("1")}, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/amlFilesystems_CreateOrUpdate.json -func ExampleAmlFilesystemsClient_BeginCreateOrUpdate() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewAmlFilesystemsClient().BeginCreateOrUpdate(ctx, "scgroup", "fs1", armstoragecache.AmlFilesystem{ - Location: to.Ptr("eastus"), - Tags: map[string]*string{ - "Dept": to.Ptr("ContosoAds"), - }, - Identity: &armstoragecache.AmlFilesystemIdentity{ - Type: to.Ptr(armstoragecache.AmlFilesystemIdentityTypeUserAssigned), - UserAssignedIdentities: map[string]*armstoragecache.UserAssignedIdentitiesValue{ - "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/identity1": {}, - }, - }, - Properties: &armstoragecache.AmlFilesystemProperties{ - EncryptionSettings: &armstoragecache.AmlFilesystemEncryptionSettings{ - KeyEncryptionKey: &armstoragecache.KeyVaultKeyReference{ - KeyURL: to.Ptr("https://examplekv.vault.azure.net/keys/kvk/3540a47df75541378d3518c6a4bdf5af"), - SourceVault: &armstoragecache.KeyVaultKeyReferenceSourceVault{ - ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.KeyVault/vaults/keyvault-cmk"), - }, - }, - }, - FilesystemSubnet: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/fsSub"), - Hsm: &armstoragecache.AmlFilesystemPropertiesHsm{ - Settings: &armstoragecache.AmlFilesystemHsmSettings{ - Container: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Storage/storageAccounts/storageaccountname/blobServices/default/containers/containername"), - ImportPrefixesInitial: []*string{ - to.Ptr("/")}, - LoggingContainer: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Storage/storageAccounts/storageaccountname/blobServices/default/containers/loggingcontainername"), - }, - }, - MaintenanceWindow: &armstoragecache.AmlFilesystemPropertiesMaintenanceWindow{ - DayOfWeek: to.Ptr(armstoragecache.MaintenanceDayOfWeekTypeFriday), - TimeOfDayUTC: to.Ptr("22:00"), - }, - RootSquashSettings: &armstoragecache.AmlFilesystemRootSquashSettings{ - Mode: to.Ptr(armstoragecache.AmlFilesystemSquashModeAll), - NoSquashNidLists: to.Ptr("10.0.0.[5-6]@tcp;10.0.1.2@tcp"), - SquashGID: to.Ptr[int64](99), - SquashUID: to.Ptr[int64](99), - }, - StorageCapacityTiB: to.Ptr[float32](16), - }, - SKU: &armstoragecache.SKUName{ - Name: to.Ptr("AMLFS-Durable-Premium-250"), - }, - Zones: []*string{ - to.Ptr("1")}, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.AmlFilesystem = armstoragecache.AmlFilesystem{ - // Name: to.Ptr("fs1"), - // Type: to.Ptr("Microsoft.StorageCache/amlFilesystem"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.StorageCache/amlFilesystems/fs1"), - // Location: to.Ptr("eastus"), - // Tags: map[string]*string{ - // "Dept": to.Ptr("ContosoAds"), - // }, - // Identity: &armstoragecache.AmlFilesystemIdentity{ - // Type: to.Ptr(armstoragecache.AmlFilesystemIdentityTypeUserAssigned), - // UserAssignedIdentities: map[string]*armstoragecache.UserAssignedIdentitiesValue{ - // "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/identity1": &armstoragecache.UserAssignedIdentitiesValue{ - // }, - // }, - // }, - // Properties: &armstoragecache.AmlFilesystemProperties{ - // ClientInfo: &armstoragecache.AmlFilesystemClientInfo{ - // ContainerStorageInterface: &armstoragecache.AmlFilesystemContainerStorageInterface{ - // PersistentVolume: to.Ptr(""), - // PersistentVolumeClaim: to.Ptr(""), - // StorageClass: to.Ptr(""), - // }, - // LustreVersion: to.Ptr("2.15.0"), - // MgsAddress: to.Ptr("10.0.0.4"), - // MountCommand: to.Ptr("mount -t lustre 10.0.0.4@tcp:/lustrefs /lustre/lustrefs"), - // }, - // FilesystemSubnet: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/fsSub"), - // Health: &armstoragecache.AmlFilesystemHealth{ - // State: to.Ptr(armstoragecache.AmlFilesystemHealthStateTypeAvailable), - // StatusDescription: to.Ptr("amlFilesystem is ok."), - // }, - // Hsm: &armstoragecache.AmlFilesystemPropertiesHsm{ - // ArchiveStatus: []*armstoragecache.AmlFilesystemArchive{ - // { - // FilesystemPath: to.Ptr("/"), - // Status: &armstoragecache.AmlFilesystemArchiveStatus{ - // LastCompletionTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-04-21T18:25:43.511Z"); return t}()), - // LastStartedTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-04-21T17:25:43.511Z"); return t}()), - // State: to.Ptr(armstoragecache.ArchiveStatusTypeCompleted), - // }, - // }}, - // Settings: &armstoragecache.AmlFilesystemHsmSettings{ - // Container: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Storage/storageAccounts/storageaccountname/blobServices/default/containers/containername"), - // ImportPrefixesInitial: []*string{ - // to.Ptr("/")}, - // LoggingContainer: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Storage/storageAccounts/storageaccountname/blobServices/default/containers/loggingcontainername"), - // }, - // }, - // MaintenanceWindow: &armstoragecache.AmlFilesystemPropertiesMaintenanceWindow{ - // DayOfWeek: to.Ptr(armstoragecache.MaintenanceDayOfWeekTypeFriday), - // TimeOfDayUTC: to.Ptr("22:00"), - // }, - // ProvisioningState: to.Ptr(armstoragecache.AmlFilesystemProvisioningStateTypeSucceeded), - // RootSquashSettings: &armstoragecache.AmlFilesystemRootSquashSettings{ - // Mode: to.Ptr(armstoragecache.AmlFilesystemSquashModeAll), - // NoSquashNidLists: to.Ptr("10.0.0.[5-6]@tcp;10.0.1.2@tcp"), - // SquashGID: to.Ptr[int64](99), - // SquashUID: to.Ptr[int64](99), - // Status: to.Ptr("nodemap.active=1"), - // }, - // StorageCapacityTiB: to.Ptr[float32](16), - // ThroughputProvisionedMBps: to.Ptr[int32](500), - // }, - // SKU: &armstoragecache.SKUName{ - // Name: to.Ptr("AMLFS-Durable-Premium-250"), - // }, - // Zones: []*string{ - // to.Ptr("1")}, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/amlFilesystems_Update.json -func ExampleAmlFilesystemsClient_BeginUpdate() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewAmlFilesystemsClient().BeginUpdate(ctx, "scgroup", "fs1", armstoragecache.AmlFilesystemUpdate{ - Properties: &armstoragecache.AmlFilesystemUpdateProperties{ - EncryptionSettings: &armstoragecache.AmlFilesystemEncryptionSettings{ - KeyEncryptionKey: &armstoragecache.KeyVaultKeyReference{ - KeyURL: to.Ptr("https://examplekv.vault.azure.net/keys/kvk/3540a47df75541378d3518c6a4bdf5af"), - SourceVault: &armstoragecache.KeyVaultKeyReferenceSourceVault{ - ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.KeyVault/vaults/keyvault-cmk"), - }, - }, - }, - MaintenanceWindow: &armstoragecache.AmlFilesystemUpdatePropertiesMaintenanceWindow{ - DayOfWeek: to.Ptr(armstoragecache.MaintenanceDayOfWeekTypeFriday), - TimeOfDayUTC: to.Ptr("22:00"), - }, - RootSquashSettings: &armstoragecache.AmlFilesystemRootSquashSettings{ - Mode: to.Ptr(armstoragecache.AmlFilesystemSquashModeAll), - NoSquashNidLists: to.Ptr("10.0.0.[5-6]@tcp;10.0.1.2@tcp"), - SquashGID: to.Ptr[int64](99), - SquashUID: to.Ptr[int64](99), - }, - }, - Tags: map[string]*string{ - "Dept": to.Ptr("ContosoAds"), - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.AmlFilesystem = armstoragecache.AmlFilesystem{ - // Name: to.Ptr("fs1"), - // Type: to.Ptr("Microsoft.StorageCache/amlFilesystem"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.StorageCache/amlFilesystems/fs1"), - // Location: to.Ptr("eastus"), - // Tags: map[string]*string{ - // "Dept": to.Ptr("ContosoAds"), - // }, - // Identity: &armstoragecache.AmlFilesystemIdentity{ - // Type: to.Ptr(armstoragecache.AmlFilesystemIdentityTypeUserAssigned), - // UserAssignedIdentities: map[string]*armstoragecache.UserAssignedIdentitiesValue{ - // "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/identity1": &armstoragecache.UserAssignedIdentitiesValue{ - // }, - // }, - // }, - // Properties: &armstoragecache.AmlFilesystemProperties{ - // ClientInfo: &armstoragecache.AmlFilesystemClientInfo{ - // ContainerStorageInterface: &armstoragecache.AmlFilesystemContainerStorageInterface{ - // PersistentVolume: to.Ptr(""), - // PersistentVolumeClaim: to.Ptr(""), - // StorageClass: to.Ptr(""), - // }, - // LustreVersion: to.Ptr("2.15.0"), - // MgsAddress: to.Ptr("10.0.0.4"), - // MountCommand: to.Ptr("mount -t lustre 10.0.0.4@tcp:/lustrefs /lustre/lustrefs"), - // }, - // EncryptionSettings: &armstoragecache.AmlFilesystemEncryptionSettings{ - // KeyEncryptionKey: &armstoragecache.KeyVaultKeyReference{ - // KeyURL: to.Ptr("https://examplekv.vault.azure.net/keys/kvk/3540a47df75541378d3518c6a4bdf5af"), - // SourceVault: &armstoragecache.KeyVaultKeyReferenceSourceVault{ - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.KeyVault/vaults/keyvault-cmk"), - // }, - // }, - // }, - // FilesystemSubnet: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/fsSub"), - // Health: &armstoragecache.AmlFilesystemHealth{ - // State: to.Ptr(armstoragecache.AmlFilesystemHealthStateTypeAvailable), - // StatusDescription: to.Ptr("amlFilesystem is ok."), - // }, - // Hsm: &armstoragecache.AmlFilesystemPropertiesHsm{ - // ArchiveStatus: []*armstoragecache.AmlFilesystemArchive{ - // { - // FilesystemPath: to.Ptr("/"), - // Status: &armstoragecache.AmlFilesystemArchiveStatus{ - // LastCompletionTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-04-21T18:25:43.511Z"); return t}()), - // LastStartedTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-04-21T17:25:43.511Z"); return t}()), - // State: to.Ptr(armstoragecache.ArchiveStatusTypeCompleted), - // }, - // }}, - // Settings: &armstoragecache.AmlFilesystemHsmSettings{ - // Container: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Storage/storageAccounts/storageaccountname/blobServices/default/containers/containername"), - // ImportPrefixesInitial: []*string{ - // to.Ptr("/")}, - // LoggingContainer: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Storage/storageAccounts/storageaccountname/blobServices/default/containers/loggingcontainername"), - // }, - // }, - // MaintenanceWindow: &armstoragecache.AmlFilesystemPropertiesMaintenanceWindow{ - // DayOfWeek: to.Ptr(armstoragecache.MaintenanceDayOfWeekTypeFriday), - // TimeOfDayUTC: to.Ptr("22:00"), - // }, - // ProvisioningState: to.Ptr(armstoragecache.AmlFilesystemProvisioningStateTypeSucceeded), - // RootSquashSettings: &armstoragecache.AmlFilesystemRootSquashSettings{ - // Mode: to.Ptr(armstoragecache.AmlFilesystemSquashModeAll), - // NoSquashNidLists: to.Ptr("10.0.0.[5-6]@tcp;10.0.1.2@tcp"), - // SquashGID: to.Ptr[int64](99), - // SquashUID: to.Ptr[int64](99), - // Status: to.Ptr("nodemap.active=1"), - // }, - // StorageCapacityTiB: to.Ptr[float32](16), - // ThroughputProvisionedMBps: to.Ptr[int32](500), - // }, - // SKU: &armstoragecache.SKUName{ - // Name: to.Ptr("AMLFS-Durable-Premium-250"), - // }, - // Zones: []*string{ - // to.Ptr("1")}, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/amlFilesystems_Archive.json -func ExampleAmlFilesystemsClient_Archive() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - _, err = clientFactory.NewAmlFilesystemsClient().Archive(ctx, "scgroup", "sc", &armstoragecache.AmlFilesystemsClientArchiveOptions{ArchiveInfo: &armstoragecache.AmlFilesystemArchiveInfo{ - FilesystemPath: to.Ptr("/"), - }, - }) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/amlFilesystems_CancelArchive.json -func ExampleAmlFilesystemsClient_CancelArchive() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - _, err = clientFactory.NewAmlFilesystemsClient().CancelArchive(ctx, "scgroup", "sc", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } -} diff --git a/sdk/resourcemanager/storagecache/armstoragecache/ascoperations_client.go b/sdk/resourcemanager/storagecache/armstoragecache/ascoperations_client.go index 06399f588155..080ec39a5d1b 100644 --- a/sdk/resourcemanager/storagecache/armstoragecache/ascoperations_client.go +++ b/sdk/resourcemanager/storagecache/armstoragecache/ascoperations_client.go @@ -46,7 +46,7 @@ func NewAscOperationsClient(subscriptionID string, credential azcore.TokenCreden // Get - Gets the status of an asynchronous operation for the Azure HPC Cache // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - location - The name of Azure region. // - operationID - The ID of an ongoing async operation. // - options - AscOperationsClientGetOptions contains the optional parameters for the AscOperationsClient.Get method. @@ -92,7 +92,7 @@ func (client *AscOperationsClient) getCreateRequest(ctx context.Context, locatio return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/sdk/resourcemanager/storagecache/armstoragecache/ascoperations_client_example_test.go b/sdk/resourcemanager/storagecache/armstoragecache/ascoperations_client_example_test.go deleted file mode 100644 index 0bba3cf78be0..000000000000 --- a/sdk/resourcemanager/storagecache/armstoragecache/ascoperations_client_example_test.go +++ /dev/null @@ -1,45 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armstoragecache_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storagecache/armstoragecache/v4" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/AscOperations_Get.json -func ExampleAscOperationsClient_Get() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewAscOperationsClient().Get(ctx, "westus", "testoperationid", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.AscOperation = armstoragecache.AscOperation{ - // Name: to.Ptr("testoperationid"), - // EndTime: to.Ptr("2023-01-01T16:13:13.933Z"), - // ID: to.Ptr("/subscriptions/id/locations/westus/ascOperations/testoperationid"), - // StartTime: to.Ptr("2023-01-01T13:13:13.933Z"), - // Status: to.Ptr("Succeeded"), - // } -} diff --git a/sdk/resourcemanager/storagecache/armstoragecache/ascusages_client.go b/sdk/resourcemanager/storagecache/armstoragecache/ascusages_client.go index 20c08de13819..47e9eeaa121a 100644 --- a/sdk/resourcemanager/storagecache/armstoragecache/ascusages_client.go +++ b/sdk/resourcemanager/storagecache/armstoragecache/ascusages_client.go @@ -45,7 +45,7 @@ func NewAscUsagesClient(subscriptionID string, credential azcore.TokenCredential // NewListPager - Gets the quantity used and quota limit for resources // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - location - The name of the region to query for usage information. // - options - AscUsagesClientListOptions contains the optional parameters for the AscUsagesClient.NewListPager method. func (client *AscUsagesClient) NewListPager(location string, options *AscUsagesClientListOptions) *runtime.Pager[AscUsagesClientListResponse] { @@ -87,7 +87,7 @@ func (client *AscUsagesClient) listCreateRequest(ctx context.Context, location s return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/sdk/resourcemanager/storagecache/armstoragecache/ascusages_client_example_test.go b/sdk/resourcemanager/storagecache/armstoragecache/ascusages_client_example_test.go deleted file mode 100644 index c0e847d3e343..000000000000 --- a/sdk/resourcemanager/storagecache/armstoragecache/ascusages_client_example_test.go +++ /dev/null @@ -1,64 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armstoragecache_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storagecache/armstoragecache/v4" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/AscResourceUsages_Get.json -func ExampleAscUsagesClient_NewListPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewAscUsagesClient().NewListPager("eastus", nil) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.ResourceUsagesListResult = armstoragecache.ResourceUsagesListResult{ - // Value: []*armstoragecache.ResourceUsage{ - // { - // Name: &armstoragecache.ResourceUsageName{ - // LocalizedValue: to.Ptr("Cache"), - // Value: to.Ptr("Cache"), - // }, - // CurrentValue: to.Ptr[int32](1), - // Limit: to.Ptr[int32](4), - // Unit: to.Ptr("Count"), - // }, - // { - // Name: &armstoragecache.ResourceUsageName{ - // LocalizedValue: to.Ptr("AmlFilesystem"), - // Value: to.Ptr("AmlFilesystem"), - // }, - // CurrentValue: to.Ptr[int32](0), - // Limit: to.Ptr[int32](4), - // Unit: to.Ptr("Count"), - // }}, - // } - } -} diff --git a/sdk/resourcemanager/storagecache/armstoragecache/autoexportjobs_client.go b/sdk/resourcemanager/storagecache/armstoragecache/autoexportjobs_client.go new file mode 100644 index 000000000000..782cc3f61429 --- /dev/null +++ b/sdk/resourcemanager/storagecache/armstoragecache/autoexportjobs_client.go @@ -0,0 +1,434 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armstoragecache + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// AutoExportJobsClient contains the methods for the AutoExportJobs group. +// Don't use this type directly, use NewAutoExportJobsClient() instead. +type AutoExportJobsClient struct { + internal *arm.Client + subscriptionID string +} + +// NewAutoExportJobsClient creates a new instance of AutoExportJobsClient with the specified values. +// - subscriptionID - The ID of the target subscription. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewAutoExportJobsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*AutoExportJobsClient, error) { + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &AutoExportJobsClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// BeginCreateOrUpdate - Create or update an auto export job. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-07-01 +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - amlFilesystemName - Name for the AML file system. Allows alphanumerics, underscores, and hyphens. Start and end with alphanumeric. +// - autoExportJobName - Name for the auto export job. Allows alphanumerics, underscores, and hyphens. Start and end with alphanumeric. +// - autoExportJob - Object containing the user-selectable properties of the auto export job. If read-only properties are included, +// they must match the existing values of those properties. +// - options - AutoExportJobsClientBeginCreateOrUpdateOptions contains the optional parameters for the AutoExportJobsClient.BeginCreateOrUpdate +// method. +func (client *AutoExportJobsClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, amlFilesystemName string, autoExportJobName string, autoExportJob AutoExportJob, options *AutoExportJobsClientBeginCreateOrUpdateOptions) (*runtime.Poller[AutoExportJobsClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, amlFilesystemName, autoExportJobName, autoExportJob, options) + if err != nil { + return nil, err + } + poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[AutoExportJobsClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), + }) + return poller, err + } else { + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[AutoExportJobsClientCreateOrUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) + } +} + +// CreateOrUpdate - Create or update an auto export job. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-07-01 +func (client *AutoExportJobsClient) createOrUpdate(ctx context.Context, resourceGroupName string, amlFilesystemName string, autoExportJobName string, autoExportJob AutoExportJob, options *AutoExportJobsClientBeginCreateOrUpdateOptions) (*http.Response, error) { + var err error + const operationName = "AutoExportJobsClient.BeginCreateOrUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, amlFilesystemName, autoExportJobName, autoExportJob, options) + if err != nil { + return nil, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return nil, err + } + return httpResp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *AutoExportJobsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, amlFilesystemName string, autoExportJobName string, autoExportJob AutoExportJob, options *AutoExportJobsClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorageCache/amlFilesystems/{amlFilesystemName}/autoExportJobs/{autoExportJobName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if amlFilesystemName == "" { + return nil, errors.New("parameter amlFilesystemName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{amlFilesystemName}", url.PathEscape(amlFilesystemName)) + if autoExportJobName == "" { + return nil, errors.New("parameter autoExportJobName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{autoExportJobName}", url.PathEscape(autoExportJobName)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2024-07-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, autoExportJob); err != nil { + return nil, err + } + return req, nil +} + +// BeginDelete - Schedules an auto export job for deletion. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-07-01 +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - amlFilesystemName - Name for the AML file system. Allows alphanumerics, underscores, and hyphens. Start and end with alphanumeric. +// - autoExportJobName - Name for the auto export job. Allows alphanumerics, underscores, and hyphens. Start and end with alphanumeric. +// - options - AutoExportJobsClientBeginDeleteOptions contains the optional parameters for the AutoExportJobsClient.BeginDelete +// method. +func (client *AutoExportJobsClient) BeginDelete(ctx context.Context, resourceGroupName string, amlFilesystemName string, autoExportJobName string, options *AutoExportJobsClientBeginDeleteOptions) (*runtime.Poller[AutoExportJobsClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, amlFilesystemName, autoExportJobName, options) + if err != nil { + return nil, err + } + poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[AutoExportJobsClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), + }) + return poller, err + } else { + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[AutoExportJobsClientDeleteResponse]{ + Tracer: client.internal.Tracer(), + }) + } +} + +// Delete - Schedules an auto export job for deletion. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-07-01 +func (client *AutoExportJobsClient) deleteOperation(ctx context.Context, resourceGroupName string, amlFilesystemName string, autoExportJobName string, options *AutoExportJobsClientBeginDeleteOptions) (*http.Response, error) { + var err error + const operationName = "AutoExportJobsClient.BeginDelete" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.deleteCreateRequest(ctx, resourceGroupName, amlFilesystemName, autoExportJobName, options) + if err != nil { + return nil, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(httpResp, http.StatusAccepted, http.StatusNoContent) { + err = runtime.NewResponseError(httpResp) + return nil, err + } + return httpResp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *AutoExportJobsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, amlFilesystemName string, autoExportJobName string, options *AutoExportJobsClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorageCache/amlFilesystems/{amlFilesystemName}/autoExportJobs/{autoExportJobName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if amlFilesystemName == "" { + return nil, errors.New("parameter amlFilesystemName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{amlFilesystemName}", url.PathEscape(amlFilesystemName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if autoExportJobName == "" { + return nil, errors.New("parameter autoExportJobName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{autoExportJobName}", url.PathEscape(autoExportJobName)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2024-07-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Returns an auto export job. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-07-01 +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - amlFilesystemName - Name for the AML file system. Allows alphanumerics, underscores, and hyphens. Start and end with alphanumeric. +// - autoExportJobName - Name for the auto export job. Allows alphanumerics, underscores, and hyphens. Start and end with alphanumeric. +// - options - AutoExportJobsClientGetOptions contains the optional parameters for the AutoExportJobsClient.Get method. +func (client *AutoExportJobsClient) Get(ctx context.Context, resourceGroupName string, amlFilesystemName string, autoExportJobName string, options *AutoExportJobsClientGetOptions) (AutoExportJobsClientGetResponse, error) { + var err error + const operationName = "AutoExportJobsClient.Get" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.getCreateRequest(ctx, resourceGroupName, amlFilesystemName, autoExportJobName, options) + if err != nil { + return AutoExportJobsClientGetResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return AutoExportJobsClientGetResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return AutoExportJobsClientGetResponse{}, err + } + resp, err := client.getHandleResponse(httpResp) + return resp, err +} + +// getCreateRequest creates the Get request. +func (client *AutoExportJobsClient) getCreateRequest(ctx context.Context, resourceGroupName string, amlFilesystemName string, autoExportJobName string, options *AutoExportJobsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorageCache/amlFilesystems/{amlFilesystemName}/autoExportJobs/{autoExportJobName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if amlFilesystemName == "" { + return nil, errors.New("parameter amlFilesystemName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{amlFilesystemName}", url.PathEscape(amlFilesystemName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if autoExportJobName == "" { + return nil, errors.New("parameter autoExportJobName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{autoExportJobName}", url.PathEscape(autoExportJobName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2024-07-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *AutoExportJobsClient) getHandleResponse(resp *http.Response) (AutoExportJobsClientGetResponse, error) { + result := AutoExportJobsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.AutoExportJob); err != nil { + return AutoExportJobsClientGetResponse{}, err + } + return result, nil +} + +// NewListByAmlFilesystemPager - Returns all the auto export jobs the user has access to under an AML File System. +// +// Generated from API version 2024-07-01 +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - amlFilesystemName - Name for the AML file system. Allows alphanumerics, underscores, and hyphens. Start and end with alphanumeric. +// - options - AutoExportJobsClientListByAmlFilesystemOptions contains the optional parameters for the AutoExportJobsClient.NewListByAmlFilesystemPager +// method. +func (client *AutoExportJobsClient) NewListByAmlFilesystemPager(resourceGroupName string, amlFilesystemName string, options *AutoExportJobsClientListByAmlFilesystemOptions) *runtime.Pager[AutoExportJobsClientListByAmlFilesystemResponse] { + return runtime.NewPager(runtime.PagingHandler[AutoExportJobsClientListByAmlFilesystemResponse]{ + More: func(page AutoExportJobsClientListByAmlFilesystemResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *AutoExportJobsClientListByAmlFilesystemResponse) (AutoExportJobsClientListByAmlFilesystemResponse, error) { + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "AutoExportJobsClient.NewListByAmlFilesystemPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink + } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listByAmlFilesystemCreateRequest(ctx, resourceGroupName, amlFilesystemName, options) + }, nil) + if err != nil { + return AutoExportJobsClientListByAmlFilesystemResponse{}, err + } + return client.listByAmlFilesystemHandleResponse(resp) + }, + Tracer: client.internal.Tracer(), + }) +} + +// listByAmlFilesystemCreateRequest creates the ListByAmlFilesystem request. +func (client *AutoExportJobsClient) listByAmlFilesystemCreateRequest(ctx context.Context, resourceGroupName string, amlFilesystemName string, options *AutoExportJobsClientListByAmlFilesystemOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorageCache/amlFilesystems/{amlFilesystemName}/autoExportJobs" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if amlFilesystemName == "" { + return nil, errors.New("parameter amlFilesystemName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{amlFilesystemName}", url.PathEscape(amlFilesystemName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2024-07-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listByAmlFilesystemHandleResponse handles the ListByAmlFilesystem response. +func (client *AutoExportJobsClient) listByAmlFilesystemHandleResponse(resp *http.Response) (AutoExportJobsClientListByAmlFilesystemResponse, error) { + result := AutoExportJobsClientListByAmlFilesystemResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.AutoExportJobsListResult); err != nil { + return AutoExportJobsClientListByAmlFilesystemResponse{}, err + } + return result, nil +} + +// BeginUpdate - Update an auto export job instance. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-07-01 +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - amlFilesystemName - Name for the AML file system. Allows alphanumerics, underscores, and hyphens. Start and end with alphanumeric. +// - autoExportJobName - Name for the auto export job. Allows alphanumerics, underscores, and hyphens. Start and end with alphanumeric. +// - autoExportJob - Object containing the user-selectable properties of the auto export job. If read-only properties are included, +// they must match the existing values of those properties. +// - options - AutoExportJobsClientBeginUpdateOptions contains the optional parameters for the AutoExportJobsClient.BeginUpdate +// method. +func (client *AutoExportJobsClient) BeginUpdate(ctx context.Context, resourceGroupName string, amlFilesystemName string, autoExportJobName string, autoExportJob AutoExportJobUpdate, options *AutoExportJobsClientBeginUpdateOptions) (*runtime.Poller[AutoExportJobsClientUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.update(ctx, resourceGroupName, amlFilesystemName, autoExportJobName, autoExportJob, options) + if err != nil { + return nil, err + } + poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[AutoExportJobsClientUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + Tracer: client.internal.Tracer(), + }) + return poller, err + } else { + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[AutoExportJobsClientUpdateResponse]{ + Tracer: client.internal.Tracer(), + }) + } +} + +// Update - Update an auto export job instance. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2024-07-01 +func (client *AutoExportJobsClient) update(ctx context.Context, resourceGroupName string, amlFilesystemName string, autoExportJobName string, autoExportJob AutoExportJobUpdate, options *AutoExportJobsClientBeginUpdateOptions) (*http.Response, error) { + var err error + const operationName = "AutoExportJobsClient.BeginUpdate" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.updateCreateRequest(ctx, resourceGroupName, amlFilesystemName, autoExportJobName, autoExportJob, options) + if err != nil { + return nil, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return nil, err + } + return httpResp, nil +} + +// updateCreateRequest creates the Update request. +func (client *AutoExportJobsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, amlFilesystemName string, autoExportJobName string, autoExportJob AutoExportJobUpdate, options *AutoExportJobsClientBeginUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorageCache/amlFilesystems/{amlFilesystemName}/autoExportJobs/{autoExportJobName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if amlFilesystemName == "" { + return nil, errors.New("parameter amlFilesystemName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{amlFilesystemName}", url.PathEscape(amlFilesystemName)) + if autoExportJobName == "" { + return nil, errors.New("parameter autoExportJobName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{autoExportJobName}", url.PathEscape(autoExportJobName)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2024-07-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, autoExportJob); err != nil { + return nil, err + } + return req, nil +} diff --git a/sdk/resourcemanager/storagecache/armstoragecache/autorest.md b/sdk/resourcemanager/storagecache/armstoragecache/autorest.md index c022bb7965f2..455ab84e23b8 100644 --- a/sdk/resourcemanager/storagecache/armstoragecache/autorest.md +++ b/sdk/resourcemanager/storagecache/armstoragecache/autorest.md @@ -5,9 +5,8 @@ ``` yaml azure-arm: true require: -- https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/readme.md -- https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/readme.go.md +- /mnt/vss/_work/1/s/azure-rest-api-specs/specification/storagecache/resource-manager/readme.md +- /mnt/vss/_work/1/s/azure-rest-api-specs/specification/storagecache/resource-manager/readme.go.md license-header: MICROSOFT_MIT_NO_VERSION -module-version: 4.0.0 -tag: package-2024-03 +module-version: 4.1.0 ``` \ No newline at end of file diff --git a/sdk/resourcemanager/storagecache/armstoragecache/caches_client.go b/sdk/resourcemanager/storagecache/armstoragecache/caches_client.go index 225e46712d25..44be18fa0f8f 100644 --- a/sdk/resourcemanager/storagecache/armstoragecache/caches_client.go +++ b/sdk/resourcemanager/storagecache/armstoragecache/caches_client.go @@ -46,7 +46,7 @@ func NewCachesClient(subscriptionID string, credential azcore.TokenCredential, o // BeginCreateOrUpdate - Create or update a cache. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - cacheName - Name of cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class. // - cache - Object containing the user-selectable properties of the new cache. If read-only properties are included, they must @@ -73,7 +73,7 @@ func (client *CachesClient) BeginCreateOrUpdate(ctx context.Context, resourceGro // CreateOrUpdate - Create or update a cache. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 func (client *CachesClient) createOrUpdate(ctx context.Context, resourceGroupName string, cacheName string, cache Cache, options *CachesClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "CachesClient.BeginCreateOrUpdate" @@ -115,7 +115,7 @@ func (client *CachesClient) createOrUpdateCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, cache); err != nil { @@ -127,7 +127,7 @@ func (client *CachesClient) createOrUpdateCreateRequest(ctx context.Context, res // BeginDebugInfo - Tells a cache to write generate debug info for support to process. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - cacheName - Name of cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class. // - options - CachesClientBeginDebugInfoOptions contains the optional parameters for the CachesClient.BeginDebugInfo method. @@ -152,7 +152,7 @@ func (client *CachesClient) BeginDebugInfo(ctx context.Context, resourceGroupNam // DebugInfo - Tells a cache to write generate debug info for support to process. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 func (client *CachesClient) debugInfo(ctx context.Context, resourceGroupName string, cacheName string, options *CachesClientBeginDebugInfoOptions) (*http.Response, error) { var err error const operationName = "CachesClient.BeginDebugInfo" @@ -194,7 +194,7 @@ func (client *CachesClient) debugInfoCreateRequest(ctx context.Context, resource return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -203,7 +203,7 @@ func (client *CachesClient) debugInfoCreateRequest(ctx context.Context, resource // BeginDelete - Schedules a cache for deletion. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - cacheName - Name of cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class. // - options - CachesClientBeginDeleteOptions contains the optional parameters for the CachesClient.BeginDelete method. @@ -227,7 +227,7 @@ func (client *CachesClient) BeginDelete(ctx context.Context, resourceGroupName s // Delete - Schedules a cache for deletion. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 func (client *CachesClient) deleteOperation(ctx context.Context, resourceGroupName string, cacheName string, options *CachesClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "CachesClient.BeginDelete" @@ -269,7 +269,7 @@ func (client *CachesClient) deleteCreateRequest(ctx context.Context, resourceGro return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -279,7 +279,7 @@ func (client *CachesClient) deleteCreateRequest(ctx context.Context, resourceGro // returned until the flush is complete. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - cacheName - Name of cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class. // - options - CachesClientBeginFlushOptions contains the optional parameters for the CachesClient.BeginFlush method. @@ -305,7 +305,7 @@ func (client *CachesClient) BeginFlush(ctx context.Context, resourceGroupName st // until the flush is complete. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 func (client *CachesClient) flush(ctx context.Context, resourceGroupName string, cacheName string, options *CachesClientBeginFlushOptions) (*http.Response, error) { var err error const operationName = "CachesClient.BeginFlush" @@ -347,7 +347,7 @@ func (client *CachesClient) flushCreateRequest(ctx context.Context, resourceGrou return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -356,7 +356,7 @@ func (client *CachesClient) flushCreateRequest(ctx context.Context, resourceGrou // Get - Returns a cache. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - cacheName - Name of cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class. // - options - CachesClientGetOptions contains the optional parameters for the CachesClient.Get method. @@ -402,7 +402,7 @@ func (client *CachesClient) getCreateRequest(ctx context.Context, resourceGroupN return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -419,7 +419,7 @@ func (client *CachesClient) getHandleResponse(resp *http.Response) (CachesClient // NewListPager - Returns all caches the user has access to under a subscription. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - options - CachesClientListOptions contains the optional parameters for the CachesClient.NewListPager method. func (client *CachesClient) NewListPager(options *CachesClientListOptions) *runtime.Pager[CachesClientListResponse] { return runtime.NewPager(runtime.PagingHandler[CachesClientListResponse]{ @@ -456,7 +456,7 @@ func (client *CachesClient) listCreateRequest(ctx context.Context, options *Cach return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -473,7 +473,7 @@ func (client *CachesClient) listHandleResponse(resp *http.Response) (CachesClien // NewListByResourceGroupPager - Returns all caches the user has access to under a resource group. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - options - CachesClientListByResourceGroupOptions contains the optional parameters for the CachesClient.NewListByResourceGroupPager // method. @@ -516,7 +516,7 @@ func (client *CachesClient) listByResourceGroupCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -534,7 +534,7 @@ func (client *CachesClient) listByResourceGroupHandleResponse(resp *http.Respons // BeginPausePrimingJob - Schedule a priming job to be paused. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - cacheName - Name of cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class. // - options - CachesClientBeginPausePrimingJobOptions contains the optional parameters for the CachesClient.BeginPausePrimingJob @@ -560,7 +560,7 @@ func (client *CachesClient) BeginPausePrimingJob(ctx context.Context, resourceGr // PausePrimingJob - Schedule a priming job to be paused. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 func (client *CachesClient) pausePrimingJob(ctx context.Context, resourceGroupName string, cacheName string, options *CachesClientBeginPausePrimingJobOptions) (*http.Response, error) { var err error const operationName = "CachesClient.BeginPausePrimingJob" @@ -602,7 +602,7 @@ func (client *CachesClient) pausePrimingJobCreateRequest(ctx context.Context, re return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.PrimingJobID != nil { @@ -617,7 +617,7 @@ func (client *CachesClient) pausePrimingJobCreateRequest(ctx context.Context, re // BeginResumePrimingJob - Resumes a paused priming job. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - cacheName - Name of cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class. // - options - CachesClientBeginResumePrimingJobOptions contains the optional parameters for the CachesClient.BeginResumePrimingJob @@ -643,7 +643,7 @@ func (client *CachesClient) BeginResumePrimingJob(ctx context.Context, resourceG // ResumePrimingJob - Resumes a paused priming job. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 func (client *CachesClient) resumePrimingJob(ctx context.Context, resourceGroupName string, cacheName string, options *CachesClientBeginResumePrimingJobOptions) (*http.Response, error) { var err error const operationName = "CachesClient.BeginResumePrimingJob" @@ -685,7 +685,7 @@ func (client *CachesClient) resumePrimingJobCreateRequest(ctx context.Context, r return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.PrimingJobID != nil { @@ -700,7 +700,7 @@ func (client *CachesClient) resumePrimingJobCreateRequest(ctx context.Context, r // BeginSpaceAllocation - Update cache space allocation. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - cacheName - Name of cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class. // - options - CachesClientBeginSpaceAllocationOptions contains the optional parameters for the CachesClient.BeginSpaceAllocation @@ -726,7 +726,7 @@ func (client *CachesClient) BeginSpaceAllocation(ctx context.Context, resourceGr // SpaceAllocation - Update cache space allocation. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 func (client *CachesClient) spaceAllocation(ctx context.Context, resourceGroupName string, cacheName string, options *CachesClientBeginSpaceAllocationOptions) (*http.Response, error) { var err error const operationName = "CachesClient.BeginSpaceAllocation" @@ -768,7 +768,7 @@ func (client *CachesClient) spaceAllocationCreateRequest(ctx context.Context, re return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.SpaceAllocation != nil { @@ -783,7 +783,7 @@ func (client *CachesClient) spaceAllocationCreateRequest(ctx context.Context, re // BeginStart - Tells a Stopped state cache to transition to Active state. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - cacheName - Name of cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class. // - options - CachesClientBeginStartOptions contains the optional parameters for the CachesClient.BeginStart method. @@ -808,7 +808,7 @@ func (client *CachesClient) BeginStart(ctx context.Context, resourceGroupName st // Start - Tells a Stopped state cache to transition to Active state. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 func (client *CachesClient) start(ctx context.Context, resourceGroupName string, cacheName string, options *CachesClientBeginStartOptions) (*http.Response, error) { var err error const operationName = "CachesClient.BeginStart" @@ -850,7 +850,7 @@ func (client *CachesClient) startCreateRequest(ctx context.Context, resourceGrou return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -859,7 +859,7 @@ func (client *CachesClient) startCreateRequest(ctx context.Context, resourceGrou // BeginStartPrimingJob - Create a priming job. This operation is only allowed when the cache is healthy. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - cacheName - Name of cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class. // - options - CachesClientBeginStartPrimingJobOptions contains the optional parameters for the CachesClient.BeginStartPrimingJob @@ -885,7 +885,7 @@ func (client *CachesClient) BeginStartPrimingJob(ctx context.Context, resourceGr // StartPrimingJob - Create a priming job. This operation is only allowed when the cache is healthy. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 func (client *CachesClient) startPrimingJob(ctx context.Context, resourceGroupName string, cacheName string, options *CachesClientBeginStartPrimingJobOptions) (*http.Response, error) { var err error const operationName = "CachesClient.BeginStartPrimingJob" @@ -927,7 +927,7 @@ func (client *CachesClient) startPrimingJobCreateRequest(ctx context.Context, re return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.Primingjob != nil { @@ -942,7 +942,7 @@ func (client *CachesClient) startPrimingJobCreateRequest(ctx context.Context, re // BeginStop - Tells an Active cache to transition to Stopped state. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - cacheName - Name of cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class. // - options - CachesClientBeginStopOptions contains the optional parameters for the CachesClient.BeginStop method. @@ -967,7 +967,7 @@ func (client *CachesClient) BeginStop(ctx context.Context, resourceGroupName str // Stop - Tells an Active cache to transition to Stopped state. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 func (client *CachesClient) stop(ctx context.Context, resourceGroupName string, cacheName string, options *CachesClientBeginStopOptions) (*http.Response, error) { var err error const operationName = "CachesClient.BeginStop" @@ -1009,7 +1009,7 @@ func (client *CachesClient) stopCreateRequest(ctx context.Context, resourceGroup return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -1018,7 +1018,7 @@ func (client *CachesClient) stopCreateRequest(ctx context.Context, resourceGroup // BeginStopPrimingJob - Schedule a priming job for deletion. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - cacheName - Name of cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class. // - options - CachesClientBeginStopPrimingJobOptions contains the optional parameters for the CachesClient.BeginStopPrimingJob @@ -1044,7 +1044,7 @@ func (client *CachesClient) BeginStopPrimingJob(ctx context.Context, resourceGro // StopPrimingJob - Schedule a priming job for deletion. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 func (client *CachesClient) stopPrimingJob(ctx context.Context, resourceGroupName string, cacheName string, options *CachesClientBeginStopPrimingJobOptions) (*http.Response, error) { var err error const operationName = "CachesClient.BeginStopPrimingJob" @@ -1086,7 +1086,7 @@ func (client *CachesClient) stopPrimingJobCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.PrimingJobID != nil { @@ -1101,7 +1101,7 @@ func (client *CachesClient) stopPrimingJobCreateRequest(ctx context.Context, res // BeginUpdate - Update a cache instance. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - cacheName - Name of cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class. // - cache - Object containing the user-selectable properties of the cache. If read-only properties are included, they must @@ -1128,7 +1128,7 @@ func (client *CachesClient) BeginUpdate(ctx context.Context, resourceGroupName s // Update - Update a cache instance. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 func (client *CachesClient) update(ctx context.Context, resourceGroupName string, cacheName string, cache Cache, options *CachesClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "CachesClient.BeginUpdate" @@ -1170,7 +1170,7 @@ func (client *CachesClient) updateCreateRequest(ctx context.Context, resourceGro return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, cache); err != nil { @@ -1182,7 +1182,7 @@ func (client *CachesClient) updateCreateRequest(ctx context.Context, resourceGro // BeginUpgradeFirmware - Upgrade a cache's firmware if a new version is available. Otherwise, this operation has no effect. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - cacheName - Name of cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class. // - options - CachesClientBeginUpgradeFirmwareOptions contains the optional parameters for the CachesClient.BeginUpgradeFirmware @@ -1208,7 +1208,7 @@ func (client *CachesClient) BeginUpgradeFirmware(ctx context.Context, resourceGr // UpgradeFirmware - Upgrade a cache's firmware if a new version is available. Otherwise, this operation has no effect. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 func (client *CachesClient) upgradeFirmware(ctx context.Context, resourceGroupName string, cacheName string, options *CachesClientBeginUpgradeFirmwareOptions) (*http.Response, error) { var err error const operationName = "CachesClient.BeginUpgradeFirmware" @@ -1250,7 +1250,7 @@ func (client *CachesClient) upgradeFirmwareCreateRequest(ctx context.Context, re return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/sdk/resourcemanager/storagecache/armstoragecache/caches_client_example_test.go b/sdk/resourcemanager/storagecache/armstoragecache/caches_client_example_test.go deleted file mode 100644 index 9eb282f83640..000000000000 --- a/sdk/resourcemanager/storagecache/armstoragecache/caches_client_example_test.go +++ /dev/null @@ -1,1858 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armstoragecache_test - -import ( - "context" - "log" - - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storagecache/armstoragecache/v4" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/Caches_List.json -func ExampleCachesClient_NewListPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewCachesClient().NewListPager(nil) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.CachesListResult = armstoragecache.CachesListResult{ - // Value: []*armstoragecache.Cache{ - // { - // Name: to.Ptr("sc1"), - // Type: to.Ptr("Microsoft.StorageCache/Cache"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.StorageCache/caches/sc1"), - // Location: to.Ptr("westus"), - // Properties: &armstoragecache.CacheProperties{ - // CacheSizeGB: to.Ptr[int32](3072), - // DirectoryServicesSettings: &armstoragecache.CacheDirectorySettings{ - // ActiveDirectory: &armstoragecache.CacheActiveDirectorySettings{ - // CacheNetBiosName: to.Ptr("contosoSmb"), - // DomainJoined: to.Ptr(armstoragecache.DomainJoinedTypeYes), - // DomainName: to.Ptr("contosoAd.contoso.local"), - // DomainNetBiosName: to.Ptr("contosoAd"), - // PrimaryDNSIPAddress: to.Ptr("192.0.2.10"), - // SecondaryDNSIPAddress: to.Ptr("192.0.2.11"), - // }, - // UsernameDownload: &armstoragecache.CacheUsernameDownloadSettings{ - // AutoDownloadCertificate: to.Ptr(false), - // CaCertificateURI: to.Ptr("http://contoso.net/cacert.pem"), - // EncryptLdapConnection: to.Ptr(false), - // ExtendedGroups: to.Ptr(true), - // GroupFileURI: to.Ptr("http://contoso.net/group.file"), - // LdapBaseDN: to.Ptr("dc=contosoad,dc=contoso,dc=local"), - // LdapServer: to.Ptr("192.0.2.12"), - // RequireValidCertificate: to.Ptr(false), - // UserFileURI: to.Ptr("http://contoso.net/passwd.file"), - // UsernameDownloaded: to.Ptr(armstoragecache.UsernameDownloadedTypeYes), - // UsernameSource: to.Ptr(armstoragecache.UsernameSourceLDAP), - // }, - // }, - // EncryptionSettings: &armstoragecache.CacheEncryptionSettings{ - // KeyEncryptionKey: &armstoragecache.KeyVaultKeyReference{ - // KeyURL: to.Ptr("https://keyvault-cmk.vault.azure.net/keys/key2048/test"), - // SourceVault: &armstoragecache.KeyVaultKeyReferenceSourceVault{ - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.KeyVault/vaults/keyvault-cmk"), - // }, - // }, - // }, - // Health: &armstoragecache.CacheHealth{ - // Conditions: []*armstoragecache.Condition{ - // { - // Message: to.Ptr("Cannot contact DNS server"), - // Timestamp: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2021-04-21T18:25:43.511Z"); return t}()), - // }}, - // State: to.Ptr(armstoragecache.HealthStateTypeTransitioning), - // StatusDescription: to.Ptr("Cache is being created."), - // }, - // MountAddresses: []*string{ - // to.Ptr("192.168.1.1"), - // to.Ptr("192.168.1.2")}, - // NetworkSettings: &armstoragecache.CacheNetworkSettings{ - // DNSSearchDomain: to.Ptr("contoso.com"), - // DNSServers: []*string{ - // to.Ptr("10.1.22.33"), - // to.Ptr("10.1.12.33")}, - // Mtu: to.Ptr[int32](1500), - // NtpServer: to.Ptr("time.contoso.com"), - // }, - // PrimingJobs: []*armstoragecache.PrimingJob{ - // }, - // ProvisioningState: to.Ptr(armstoragecache.ProvisioningStateTypeSucceeded), - // SecuritySettings: &armstoragecache.CacheSecuritySettings{ - // AccessPolicies: []*armstoragecache.NfsAccessPolicy{ - // { - // Name: to.Ptr("default"), - // AccessRules: []*armstoragecache.NfsAccessRule{ - // { - // Access: to.Ptr(armstoragecache.NfsAccessRuleAccessRw), - // RootSquash: to.Ptr(false), - // Scope: to.Ptr(armstoragecache.NfsAccessRuleScopeDefault), - // SubmountAccess: to.Ptr(true), - // Suid: to.Ptr(false), - // }}, - // }}, - // }, - // SpaceAllocation: []*armstoragecache.StorageTargetSpaceAllocation{ - // { - // Name: to.Ptr("st1"), - // AllocationPercentage: to.Ptr[int32](25), - // }, - // { - // Name: to.Ptr("st2"), - // AllocationPercentage: to.Ptr[int32](50), - // }, - // { - // Name: to.Ptr("st3"), - // AllocationPercentage: to.Ptr[int32](25), - // }}, - // Subnet: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/sub1"), - // UpgradeSettings: &armstoragecache.CacheUpgradeSettings{ - // ScheduledTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-04-26T18:25:43.511Z"); return t}()), - // UpgradeScheduleEnabled: to.Ptr(true), - // }, - // UpgradeStatus: &armstoragecache.CacheUpgradeStatus{ - // CurrentFirmwareVersion: to.Ptr("2022.08.1"), - // FirmwareUpdateDeadline: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-04-21T18:25:43.511Z"); return t}()), - // FirmwareUpdateStatus: to.Ptr(armstoragecache.FirmwareStatusTypeAvailable), - // LastFirmwareUpdate: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-01-21T18:25:43.511Z"); return t}()), - // PendingFirmwareVersion: to.Ptr("2022.08.1"), - // }, - // }, - // SKU: &armstoragecache.CacheSKU{ - // Name: to.Ptr("Standard_2G"), - // }, - // SystemData: &armstoragecache.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T17:18:19.123Z"); return t}()), - // CreatedBy: to.Ptr("user1"), - // CreatedByType: to.Ptr(armstoragecache.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-02T17:18:19.123Z"); return t}()), - // LastModifiedBy: to.Ptr("user2"), - // LastModifiedByType: to.Ptr(armstoragecache.CreatedByTypeUser), - // }, - // Tags: map[string]*string{ - // "Dept": to.Ptr("Contoso"), - // }, - // }, - // { - // Name: to.Ptr("sc2"), - // Type: to.Ptr("Microsoft.StorageCache/Cache"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.StorageCache/caches/sc2"), - // Location: to.Ptr("westus"), - // Properties: &armstoragecache.CacheProperties{ - // CacheSizeGB: to.Ptr[int32](3072), - // DirectoryServicesSettings: &armstoragecache.CacheDirectorySettings{ - // ActiveDirectory: &armstoragecache.CacheActiveDirectorySettings{ - // CacheNetBiosName: to.Ptr("contosoSmb"), - // DomainJoined: to.Ptr(armstoragecache.DomainJoinedTypeYes), - // DomainName: to.Ptr("contosoAd.contoso.local"), - // DomainNetBiosName: to.Ptr("contosoAd"), - // PrimaryDNSIPAddress: to.Ptr("192.0.2.10"), - // SecondaryDNSIPAddress: to.Ptr("192.0.2.11"), - // }, - // UsernameDownload: &armstoragecache.CacheUsernameDownloadSettings{ - // AutoDownloadCertificate: to.Ptr(false), - // CaCertificateURI: to.Ptr("http://contoso.net/cacert.pem"), - // EncryptLdapConnection: to.Ptr(false), - // ExtendedGroups: to.Ptr(true), - // GroupFileURI: to.Ptr("http://contoso.net/group.file"), - // LdapBaseDN: to.Ptr(""), - // LdapServer: to.Ptr(""), - // RequireValidCertificate: to.Ptr(false), - // UserFileURI: to.Ptr("http://contoso.net/passwd.file"), - // UsernameDownloaded: to.Ptr(armstoragecache.UsernameDownloadedTypeYes), - // UsernameSource: to.Ptr(armstoragecache.UsernameSourceAD), - // }, - // }, - // EncryptionSettings: &armstoragecache.CacheEncryptionSettings{ - // KeyEncryptionKey: &armstoragecache.KeyVaultKeyReference{ - // KeyURL: to.Ptr("https://keyvault-cmk.vault.azure.net/keys/key2048/test"), - // SourceVault: &armstoragecache.KeyVaultKeyReferenceSourceVault{ - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.KeyVault/vaults/keyvault-cmk"), - // }, - // }, - // }, - // Health: &armstoragecache.CacheHealth{ - // Conditions: []*armstoragecache.Condition{ - // { - // Message: to.Ptr("Cannot contact DNS server"), - // Timestamp: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2021-04-21T18:25:43.511Z"); return t}()), - // }}, - // State: to.Ptr(armstoragecache.HealthStateTypeTransitioning), - // StatusDescription: to.Ptr("Cache is being created."), - // }, - // MountAddresses: []*string{ - // to.Ptr("192.168.1.1"), - // to.Ptr("192.168.1.2")}, - // NetworkSettings: &armstoragecache.CacheNetworkSettings{ - // DNSSearchDomain: to.Ptr("contoso.com"), - // DNSServers: []*string{ - // to.Ptr("10.1.22.33"), - // to.Ptr("10.1.12.33")}, - // Mtu: to.Ptr[int32](1500), - // NtpServer: to.Ptr("time.contoso.com"), - // }, - // PrimingJobs: []*armstoragecache.PrimingJob{ - // { - // PrimingJobDetails: to.Ptr("Files: Cached=635, Failed=0, Excluded=80, Data=346030 bytes, Directories: Cached=1003, Failed=0, Excluded=0"), - // PrimingJobID: to.Ptr("00000000000_0000000000"), - // PrimingJobName: to.Ptr("contosoJob1"), - // PrimingJobPercentComplete: to.Ptr[float64](100), - // PrimingJobState: to.Ptr(armstoragecache.PrimingJobStateComplete), - // PrimingJobStatus: to.Ptr("success"), - // }, - // { - // PrimingJobDetails: to.Ptr(""), - // PrimingJobID: to.Ptr("11111111111_1111111111"), - // PrimingJobName: to.Ptr("contosoJob2"), - // PrimingJobPercentComplete: to.Ptr[float64](0), - // PrimingJobState: to.Ptr(armstoragecache.PrimingJobStateQueued), - // PrimingJobStatus: to.Ptr(""), - // }}, - // ProvisioningState: to.Ptr(armstoragecache.ProvisioningStateTypeSucceeded), - // SecuritySettings: &armstoragecache.CacheSecuritySettings{ - // AccessPolicies: []*armstoragecache.NfsAccessPolicy{ - // { - // Name: to.Ptr("default"), - // AccessRules: []*armstoragecache.NfsAccessRule{ - // { - // Access: to.Ptr(armstoragecache.NfsAccessRuleAccessRw), - // RootSquash: to.Ptr(false), - // Scope: to.Ptr(armstoragecache.NfsAccessRuleScopeDefault), - // SubmountAccess: to.Ptr(true), - // Suid: to.Ptr(false), - // }}, - // }}, - // }, - // SpaceAllocation: []*armstoragecache.StorageTargetSpaceAllocation{ - // }, - // Subnet: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/sub2"), - // UpgradeSettings: &armstoragecache.CacheUpgradeSettings{ - // ScheduledTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-04-26T18:25:43.511Z"); return t}()), - // UpgradeScheduleEnabled: to.Ptr(true), - // }, - // UpgradeStatus: &armstoragecache.CacheUpgradeStatus{ - // CurrentFirmwareVersion: to.Ptr("2022.08.1"), - // FirmwareUpdateDeadline: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-04-21T18:25:43.511Z"); return t}()), - // FirmwareUpdateStatus: to.Ptr(armstoragecache.FirmwareStatusTypeAvailable), - // LastFirmwareUpdate: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-01-21T18:25:43.511Z"); return t}()), - // PendingFirmwareVersion: to.Ptr("2022.08.1"), - // }, - // Zones: []*string{ - // to.Ptr("1")}, - // }, - // SKU: &armstoragecache.CacheSKU{ - // Name: to.Ptr("Standard_2G"), - // }, - // SystemData: &armstoragecache.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T17:18:19.123Z"); return t}()), - // CreatedBy: to.Ptr("user1"), - // CreatedByType: to.Ptr(armstoragecache.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-02T17:18:19.123Z"); return t}()), - // LastModifiedBy: to.Ptr("user2"), - // LastModifiedByType: to.Ptr(armstoragecache.CreatedByTypeUser), - // }, - // Tags: map[string]*string{ - // "Dept": to.Ptr("Contoso"), - // }, - // }}, - // } - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/Caches_ListByResourceGroup.json -func ExampleCachesClient_NewListByResourceGroupPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewCachesClient().NewListByResourceGroupPager("scgroup", nil) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.CachesListResult = armstoragecache.CachesListResult{ - // Value: []*armstoragecache.Cache{ - // { - // Name: to.Ptr("sc1"), - // Type: to.Ptr("Microsoft.StorageCache/Cache"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.StorageCache/caches/sc1"), - // Location: to.Ptr("westus"), - // Properties: &armstoragecache.CacheProperties{ - // CacheSizeGB: to.Ptr[int32](3072), - // DirectoryServicesSettings: &armstoragecache.CacheDirectorySettings{ - // ActiveDirectory: &armstoragecache.CacheActiveDirectorySettings{ - // CacheNetBiosName: to.Ptr("contosoSmb"), - // DomainJoined: to.Ptr(armstoragecache.DomainJoinedTypeYes), - // DomainName: to.Ptr("contosoAd.contoso.local"), - // DomainNetBiosName: to.Ptr("contosoAd"), - // PrimaryDNSIPAddress: to.Ptr("192.0.2.10"), - // SecondaryDNSIPAddress: to.Ptr("192.0.2.11"), - // }, - // UsernameDownload: &armstoragecache.CacheUsernameDownloadSettings{ - // AutoDownloadCertificate: to.Ptr(false), - // CaCertificateURI: to.Ptr("http://contoso.net/cacert.pem"), - // EncryptLdapConnection: to.Ptr(false), - // ExtendedGroups: to.Ptr(true), - // GroupFileURI: to.Ptr("http://contoso.net/group.file"), - // LdapBaseDN: to.Ptr("dc=contosoad,dc=contoso,dc=local"), - // LdapServer: to.Ptr("192.0.2.12"), - // RequireValidCertificate: to.Ptr(false), - // UserFileURI: to.Ptr("http://contoso.net/passwd.file"), - // UsernameDownloaded: to.Ptr(armstoragecache.UsernameDownloadedTypeYes), - // UsernameSource: to.Ptr(armstoragecache.UsernameSourceLDAP), - // }, - // }, - // EncryptionSettings: &armstoragecache.CacheEncryptionSettings{ - // KeyEncryptionKey: &armstoragecache.KeyVaultKeyReference{ - // KeyURL: to.Ptr("https://keyvault-cmk.vault.azure.net/keys/key2048/test"), - // SourceVault: &armstoragecache.KeyVaultKeyReferenceSourceVault{ - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.KeyVault/vaults/keyvault-cmk"), - // }, - // }, - // }, - // Health: &armstoragecache.CacheHealth{ - // Conditions: []*armstoragecache.Condition{ - // { - // Message: to.Ptr("Cannot contact DNS server"), - // Timestamp: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2021-04-21T18:25:43.511Z"); return t}()), - // }}, - // State: to.Ptr(armstoragecache.HealthStateTypeTransitioning), - // StatusDescription: to.Ptr("Cache is being created."), - // }, - // MountAddresses: []*string{ - // to.Ptr("192.168.1.1"), - // to.Ptr("192.168.1.2")}, - // NetworkSettings: &armstoragecache.CacheNetworkSettings{ - // DNSSearchDomain: to.Ptr("contoso.com"), - // DNSServers: []*string{ - // to.Ptr("10.1.22.33"), - // to.Ptr("10.1.12.33")}, - // Mtu: to.Ptr[int32](1500), - // NtpServer: to.Ptr("time.contoso.com"), - // }, - // PrimingJobs: []*armstoragecache.PrimingJob{ - // }, - // ProvisioningState: to.Ptr(armstoragecache.ProvisioningStateTypeSucceeded), - // SecuritySettings: &armstoragecache.CacheSecuritySettings{ - // AccessPolicies: []*armstoragecache.NfsAccessPolicy{ - // { - // Name: to.Ptr("default"), - // AccessRules: []*armstoragecache.NfsAccessRule{ - // { - // Access: to.Ptr(armstoragecache.NfsAccessRuleAccessRw), - // RootSquash: to.Ptr(false), - // Scope: to.Ptr(armstoragecache.NfsAccessRuleScopeDefault), - // SubmountAccess: to.Ptr(true), - // Suid: to.Ptr(false), - // }}, - // }}, - // }, - // SpaceAllocation: []*armstoragecache.StorageTargetSpaceAllocation{ - // { - // Name: to.Ptr("st1"), - // AllocationPercentage: to.Ptr[int32](25), - // }, - // { - // Name: to.Ptr("st2"), - // AllocationPercentage: to.Ptr[int32](50), - // }, - // { - // Name: to.Ptr("st3"), - // AllocationPercentage: to.Ptr[int32](25), - // }}, - // Subnet: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/sub1"), - // UpgradeSettings: &armstoragecache.CacheUpgradeSettings{ - // ScheduledTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-04-26T18:25:43.511Z"); return t}()), - // UpgradeScheduleEnabled: to.Ptr(true), - // }, - // UpgradeStatus: &armstoragecache.CacheUpgradeStatus{ - // CurrentFirmwareVersion: to.Ptr("2022.08.1"), - // FirmwareUpdateDeadline: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-04-21T18:25:43.511Z"); return t}()), - // FirmwareUpdateStatus: to.Ptr(armstoragecache.FirmwareStatusTypeAvailable), - // LastFirmwareUpdate: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-01-21T18:25:43.511Z"); return t}()), - // PendingFirmwareVersion: to.Ptr("2022.08.1"), - // }, - // Zones: []*string{ - // to.Ptr("1")}, - // }, - // SKU: &armstoragecache.CacheSKU{ - // Name: to.Ptr("Standard_2G"), - // }, - // SystemData: &armstoragecache.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T17:18:19.123Z"); return t}()), - // CreatedBy: to.Ptr("user1"), - // CreatedByType: to.Ptr(armstoragecache.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-02T17:18:19.123Z"); return t}()), - // LastModifiedBy: to.Ptr("user2"), - // LastModifiedByType: to.Ptr(armstoragecache.CreatedByTypeUser), - // }, - // Tags: map[string]*string{ - // "Dept": to.Ptr("Contoso"), - // }, - // }, - // { - // Name: to.Ptr("sc2"), - // Type: to.Ptr("Microsoft.StorageCache/Cache"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.StorageCache/caches/sc2"), - // Location: to.Ptr("westus"), - // Properties: &armstoragecache.CacheProperties{ - // CacheSizeGB: to.Ptr[int32](3072), - // DirectoryServicesSettings: &armstoragecache.CacheDirectorySettings{ - // ActiveDirectory: &armstoragecache.CacheActiveDirectorySettings{ - // CacheNetBiosName: to.Ptr("contosoSmb"), - // DomainJoined: to.Ptr(armstoragecache.DomainJoinedTypeYes), - // DomainName: to.Ptr("contosoAd.contoso.local"), - // DomainNetBiosName: to.Ptr("contosoAd"), - // PrimaryDNSIPAddress: to.Ptr("192.0.2.10"), - // SecondaryDNSIPAddress: to.Ptr("192.0.2.11"), - // }, - // UsernameDownload: &armstoragecache.CacheUsernameDownloadSettings{ - // AutoDownloadCertificate: to.Ptr(false), - // CaCertificateURI: to.Ptr("http://contoso.net/cacert.pem"), - // EncryptLdapConnection: to.Ptr(false), - // ExtendedGroups: to.Ptr(true), - // GroupFileURI: to.Ptr("http://contoso.net/group.file"), - // LdapBaseDN: to.Ptr(""), - // LdapServer: to.Ptr(""), - // RequireValidCertificate: to.Ptr(false), - // UserFileURI: to.Ptr("http://contoso.net/passwd.file"), - // UsernameDownloaded: to.Ptr(armstoragecache.UsernameDownloadedTypeYes), - // UsernameSource: to.Ptr(armstoragecache.UsernameSourceAD), - // }, - // }, - // EncryptionSettings: &armstoragecache.CacheEncryptionSettings{ - // KeyEncryptionKey: &armstoragecache.KeyVaultKeyReference{ - // KeyURL: to.Ptr("https://keyvault-cmk.vault.azure.net/keys/key2048/test"), - // SourceVault: &armstoragecache.KeyVaultKeyReferenceSourceVault{ - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.KeyVault/vaults/keyvault-cmk"), - // }, - // }, - // }, - // Health: &armstoragecache.CacheHealth{ - // Conditions: []*armstoragecache.Condition{ - // { - // Message: to.Ptr("Cannot contact DNS server"), - // Timestamp: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2021-04-21T18:25:43.511Z"); return t}()), - // }}, - // State: to.Ptr(armstoragecache.HealthStateTypeTransitioning), - // StatusDescription: to.Ptr("Cache is being created."), - // }, - // MountAddresses: []*string{ - // to.Ptr("192.168.1.1"), - // to.Ptr("192.168.1.2")}, - // NetworkSettings: &armstoragecache.CacheNetworkSettings{ - // DNSSearchDomain: to.Ptr("contoso.com"), - // DNSServers: []*string{ - // to.Ptr("10.1.22.33"), - // to.Ptr("10.1.12.33")}, - // Mtu: to.Ptr[int32](1500), - // NtpServer: to.Ptr("time.contoso.com"), - // }, - // PrimingJobs: []*armstoragecache.PrimingJob{ - // { - // PrimingJobDetails: to.Ptr("Files: Cached=635, Failed=0, Excluded=80, Data=346030 bytes, Directories: Cached=1003, Failed=0, Excluded=0"), - // PrimingJobID: to.Ptr("00000000000_0000000000"), - // PrimingJobName: to.Ptr("contosoJob1"), - // PrimingJobPercentComplete: to.Ptr[float64](100), - // PrimingJobState: to.Ptr(armstoragecache.PrimingJobStateComplete), - // PrimingJobStatus: to.Ptr("success"), - // }, - // { - // PrimingJobDetails: to.Ptr(""), - // PrimingJobID: to.Ptr("11111111111_1111111111"), - // PrimingJobName: to.Ptr("contosoJob2"), - // PrimingJobPercentComplete: to.Ptr[float64](0), - // PrimingJobState: to.Ptr(armstoragecache.PrimingJobStateQueued), - // PrimingJobStatus: to.Ptr(""), - // }}, - // ProvisioningState: to.Ptr(armstoragecache.ProvisioningStateTypeSucceeded), - // SecuritySettings: &armstoragecache.CacheSecuritySettings{ - // AccessPolicies: []*armstoragecache.NfsAccessPolicy{ - // { - // Name: to.Ptr("default"), - // AccessRules: []*armstoragecache.NfsAccessRule{ - // { - // Access: to.Ptr(armstoragecache.NfsAccessRuleAccessRw), - // RootSquash: to.Ptr(false), - // Scope: to.Ptr(armstoragecache.NfsAccessRuleScopeDefault), - // SubmountAccess: to.Ptr(true), - // Suid: to.Ptr(false), - // }}, - // }}, - // }, - // SpaceAllocation: []*armstoragecache.StorageTargetSpaceAllocation{ - // }, - // Subnet: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/sub2"), - // UpgradeSettings: &armstoragecache.CacheUpgradeSettings{ - // ScheduledTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-04-26T18:25:43.511Z"); return t}()), - // UpgradeScheduleEnabled: to.Ptr(true), - // }, - // UpgradeStatus: &armstoragecache.CacheUpgradeStatus{ - // CurrentFirmwareVersion: to.Ptr("2022.08.1"), - // FirmwareUpdateDeadline: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-04-21T18:25:43.511Z"); return t}()), - // FirmwareUpdateStatus: to.Ptr(armstoragecache.FirmwareStatusTypeAvailable), - // LastFirmwareUpdate: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-01-21T18:25:43.511Z"); return t}()), - // PendingFirmwareVersion: to.Ptr("2022.08.1"), - // }, - // Zones: []*string{ - // to.Ptr("2")}, - // }, - // SKU: &armstoragecache.CacheSKU{ - // Name: to.Ptr("Standard_2G"), - // }, - // SystemData: &armstoragecache.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T17:18:19.123Z"); return t}()), - // CreatedBy: to.Ptr("user1"), - // CreatedByType: to.Ptr(armstoragecache.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-02T17:18:19.123Z"); return t}()), - // LastModifiedBy: to.Ptr("user2"), - // LastModifiedByType: to.Ptr(armstoragecache.CreatedByTypeUser), - // }, - // Tags: map[string]*string{ - // "Dept": to.Ptr("Contoso"), - // }, - // }}, - // } - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/Caches_Delete.json -func ExampleCachesClient_BeginDelete() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewCachesClient().BeginDelete(ctx, "scgroup", "sc", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/Caches_Get.json -func ExampleCachesClient_Get() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewCachesClient().Get(ctx, "scgroup", "sc1", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.Cache = armstoragecache.Cache{ - // Name: to.Ptr("sc1"), - // Type: to.Ptr("Microsoft.StorageCache/Cache"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.StorageCache/caches/sc1"), - // Location: to.Ptr("westus"), - // Properties: &armstoragecache.CacheProperties{ - // CacheSizeGB: to.Ptr[int32](3072), - // DirectoryServicesSettings: &armstoragecache.CacheDirectorySettings{ - // ActiveDirectory: &armstoragecache.CacheActiveDirectorySettings{ - // CacheNetBiosName: to.Ptr("contosoSmb"), - // DomainJoined: to.Ptr(armstoragecache.DomainJoinedTypeYes), - // DomainName: to.Ptr("contosoAd.contoso.local"), - // DomainNetBiosName: to.Ptr("contosoAd"), - // PrimaryDNSIPAddress: to.Ptr("192.0.2.10"), - // SecondaryDNSIPAddress: to.Ptr("192.0.2.11"), - // }, - // UsernameDownload: &armstoragecache.CacheUsernameDownloadSettings{ - // AutoDownloadCertificate: to.Ptr(false), - // CaCertificateURI: to.Ptr("http://contoso.net/cacert.pem"), - // EncryptLdapConnection: to.Ptr(false), - // ExtendedGroups: to.Ptr(true), - // GroupFileURI: to.Ptr("http://contoso.net/group.file"), - // LdapBaseDN: to.Ptr("dc=contosoad,dc=contoso,dc=local"), - // LdapServer: to.Ptr("192.0.2.12"), - // RequireValidCertificate: to.Ptr(false), - // UserFileURI: to.Ptr("http://contoso.net/passwd.file"), - // UsernameDownloaded: to.Ptr(armstoragecache.UsernameDownloadedTypeYes), - // UsernameSource: to.Ptr(armstoragecache.UsernameSourceLDAP), - // }, - // }, - // EncryptionSettings: &armstoragecache.CacheEncryptionSettings{ - // KeyEncryptionKey: &armstoragecache.KeyVaultKeyReference{ - // KeyURL: to.Ptr("https://keyvault-cmk.vault.azure.net/keys/key2048/test"), - // SourceVault: &armstoragecache.KeyVaultKeyReferenceSourceVault{ - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.KeyVault/vaults/keyvault-cmk"), - // }, - // }, - // }, - // Health: &armstoragecache.CacheHealth{ - // Conditions: []*armstoragecache.Condition{ - // { - // Message: to.Ptr("Cannot contact DNS server"), - // Timestamp: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2021-04-21T18:25:43.511Z"); return t}()), - // }}, - // State: to.Ptr(armstoragecache.HealthStateTypeTransitioning), - // StatusDescription: to.Ptr("Cache is being created."), - // }, - // MountAddresses: []*string{ - // to.Ptr("192.168.1.1"), - // to.Ptr("192.168.1.2")}, - // NetworkSettings: &armstoragecache.CacheNetworkSettings{ - // DNSSearchDomain: to.Ptr("contoso.com"), - // DNSServers: []*string{ - // to.Ptr("10.1.22.33"), - // to.Ptr("10.1.12.33")}, - // Mtu: to.Ptr[int32](1500), - // NtpServer: to.Ptr("time.contoso.com"), - // }, - // PrimingJobs: []*armstoragecache.PrimingJob{ - // { - // PrimingJobDetails: to.Ptr("Files: Cached=635, Failed=0, Excluded=80, Data=346030 bytes, Directories: Cached=1003, Failed=0, Excluded=0"), - // PrimingJobID: to.Ptr("00000000000_0000000000"), - // PrimingJobName: to.Ptr("contosoJob1"), - // PrimingJobPercentComplete: to.Ptr[float64](100), - // PrimingJobState: to.Ptr(armstoragecache.PrimingJobStateComplete), - // PrimingJobStatus: to.Ptr("success"), - // }, - // { - // PrimingJobDetails: to.Ptr(""), - // PrimingJobID: to.Ptr("11111111111_1111111111"), - // PrimingJobName: to.Ptr("contosoJob2"), - // PrimingJobPercentComplete: to.Ptr[float64](0), - // PrimingJobState: to.Ptr(armstoragecache.PrimingJobStateQueued), - // PrimingJobStatus: to.Ptr(""), - // }}, - // ProvisioningState: to.Ptr(armstoragecache.ProvisioningStateTypeSucceeded), - // SecuritySettings: &armstoragecache.CacheSecuritySettings{ - // AccessPolicies: []*armstoragecache.NfsAccessPolicy{ - // { - // Name: to.Ptr("default"), - // AccessRules: []*armstoragecache.NfsAccessRule{ - // { - // Access: to.Ptr(armstoragecache.NfsAccessRuleAccessRw), - // RootSquash: to.Ptr(false), - // Scope: to.Ptr(armstoragecache.NfsAccessRuleScopeDefault), - // SubmountAccess: to.Ptr(true), - // Suid: to.Ptr(false), - // }}, - // }}, - // }, - // SpaceAllocation: []*armstoragecache.StorageTargetSpaceAllocation{ - // { - // Name: to.Ptr("st1"), - // AllocationPercentage: to.Ptr[int32](25), - // }, - // { - // Name: to.Ptr("st2"), - // AllocationPercentage: to.Ptr[int32](50), - // }, - // { - // Name: to.Ptr("st3"), - // AllocationPercentage: to.Ptr[int32](25), - // }}, - // Subnet: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/sub1"), - // UpgradeSettings: &armstoragecache.CacheUpgradeSettings{ - // ScheduledTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-04-26T18:25:43.511Z"); return t}()), - // UpgradeScheduleEnabled: to.Ptr(true), - // }, - // UpgradeStatus: &armstoragecache.CacheUpgradeStatus{ - // CurrentFirmwareVersion: to.Ptr("2022.08.1"), - // FirmwareUpdateDeadline: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-04-21T18:25:43.511Z"); return t}()), - // FirmwareUpdateStatus: to.Ptr(armstoragecache.FirmwareStatusTypeAvailable), - // LastFirmwareUpdate: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-01-21T18:25:43.511Z"); return t}()), - // PendingFirmwareVersion: to.Ptr("2022.08.1"), - // }, - // Zones: []*string{ - // to.Ptr("1")}, - // }, - // SKU: &armstoragecache.CacheSKU{ - // Name: to.Ptr("Standard_2G"), - // }, - // SystemData: &armstoragecache.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T17:18:19.123Z"); return t}()), - // CreatedBy: to.Ptr("user1"), - // CreatedByType: to.Ptr(armstoragecache.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-02T17:18:19.123Z"); return t}()), - // LastModifiedBy: to.Ptr("user2"), - // LastModifiedByType: to.Ptr(armstoragecache.CreatedByTypeUser), - // }, - // Tags: map[string]*string{ - // "Dept": to.Ptr("Contoso"), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/Caches_CreateOrUpdate.json -func ExampleCachesClient_BeginCreateOrUpdate_cachesCreateOrUpdate() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewCachesClient().BeginCreateOrUpdate(ctx, "scgroup", "sc1", armstoragecache.Cache{ - Identity: &armstoragecache.CacheIdentity{ - Type: to.Ptr(armstoragecache.CacheIdentityTypeUserAssigned), - UserAssignedIdentities: map[string]*armstoragecache.UserAssignedIdentitiesValue{ - "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/identity1": {}, - }, - }, - Location: to.Ptr("westus"), - Properties: &armstoragecache.CacheProperties{ - CacheSizeGB: to.Ptr[int32](3072), - DirectoryServicesSettings: &armstoragecache.CacheDirectorySettings{ - ActiveDirectory: &armstoragecache.CacheActiveDirectorySettings{ - CacheNetBiosName: to.Ptr("contosoSmb"), - Credentials: &armstoragecache.CacheActiveDirectorySettingsCredentials{ - Password: to.Ptr(""), - Username: to.Ptr("consotoAdmin"), - }, - DomainName: to.Ptr("contosoAd.contoso.local"), - DomainNetBiosName: to.Ptr("contosoAd"), - PrimaryDNSIPAddress: to.Ptr("192.0.2.10"), - SecondaryDNSIPAddress: to.Ptr("192.0.2.11"), - }, - UsernameDownload: &armstoragecache.CacheUsernameDownloadSettings{ - Credentials: &armstoragecache.CacheUsernameDownloadSettingsCredentials{ - BindDn: to.Ptr("cn=ldapadmin,dc=contosoad,dc=contoso,dc=local"), - BindPassword: to.Ptr(""), - }, - ExtendedGroups: to.Ptr(true), - LdapBaseDN: to.Ptr("dc=contosoad,dc=contoso,dc=local"), - LdapServer: to.Ptr("192.0.2.12"), - UsernameSource: to.Ptr(armstoragecache.UsernameSourceLDAP), - }, - }, - EncryptionSettings: &armstoragecache.CacheEncryptionSettings{ - KeyEncryptionKey: &armstoragecache.KeyVaultKeyReference{ - KeyURL: to.Ptr("https://keyvault-cmk.vault.azure.net/keys/key2047/test"), - SourceVault: &armstoragecache.KeyVaultKeyReferenceSourceVault{ - ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.KeyVault/vaults/keyvault-cmk"), - }, - }, - }, - SecuritySettings: &armstoragecache.CacheSecuritySettings{ - AccessPolicies: []*armstoragecache.NfsAccessPolicy{ - { - Name: to.Ptr("default"), - AccessRules: []*armstoragecache.NfsAccessRule{ - { - Access: to.Ptr(armstoragecache.NfsAccessRuleAccessRw), - RootSquash: to.Ptr(false), - Scope: to.Ptr(armstoragecache.NfsAccessRuleScopeDefault), - SubmountAccess: to.Ptr(true), - Suid: to.Ptr(false), - }}, - }}, - }, - Subnet: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/sub1"), - UpgradeSettings: &armstoragecache.CacheUpgradeSettings{ - ScheduledTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-04-26T18:25:43.511Z"); return t }()), - UpgradeScheduleEnabled: to.Ptr(true), - }, - }, - SKU: &armstoragecache.CacheSKU{ - Name: to.Ptr("Standard_2G"), - }, - Tags: map[string]*string{ - "Dept": to.Ptr("Contoso"), - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.Cache = armstoragecache.Cache{ - // Name: to.Ptr("sc1"), - // Type: to.Ptr("Microsoft.StorageCache/Cache"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.StorageCache/caches/sc1"), - // Location: to.Ptr("westus"), - // Properties: &armstoragecache.CacheProperties{ - // CacheSizeGB: to.Ptr[int32](3072), - // DirectoryServicesSettings: &armstoragecache.CacheDirectorySettings{ - // ActiveDirectory: &armstoragecache.CacheActiveDirectorySettings{ - // CacheNetBiosName: to.Ptr("contosoSmb"), - // DomainJoined: to.Ptr(armstoragecache.DomainJoinedTypeYes), - // DomainName: to.Ptr("contosoAd.contoso.local"), - // DomainNetBiosName: to.Ptr("contosoAd"), - // PrimaryDNSIPAddress: to.Ptr("192.0.2.10"), - // SecondaryDNSIPAddress: to.Ptr("192.0.2.11"), - // }, - // UsernameDownload: &armstoragecache.CacheUsernameDownloadSettings{ - // AutoDownloadCertificate: to.Ptr(false), - // CaCertificateURI: to.Ptr("http://contoso.net/cacert.pem"), - // EncryptLdapConnection: to.Ptr(false), - // ExtendedGroups: to.Ptr(true), - // GroupFileURI: to.Ptr("http://contoso.net/group.file"), - // LdapBaseDN: to.Ptr("dc=contosoad,dc=contoso,dc=local"), - // LdapServer: to.Ptr("192.0.2.12"), - // RequireValidCertificate: to.Ptr(false), - // UserFileURI: to.Ptr("http://contoso.net/passwd.file"), - // UsernameDownloaded: to.Ptr(armstoragecache.UsernameDownloadedTypeYes), - // UsernameSource: to.Ptr(armstoragecache.UsernameSourceLDAP), - // }, - // }, - // EncryptionSettings: &armstoragecache.CacheEncryptionSettings{ - // KeyEncryptionKey: &armstoragecache.KeyVaultKeyReference{ - // KeyURL: to.Ptr("https://keyvault-cmk.vault.azure.net/keys/key2048/test"), - // SourceVault: &armstoragecache.KeyVaultKeyReferenceSourceVault{ - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.KeyVault/vaults/keyvault-cmk"), - // }, - // }, - // }, - // Health: &armstoragecache.CacheHealth{ - // Conditions: []*armstoragecache.Condition{ - // { - // Message: to.Ptr("Cannot contact DNS server"), - // Timestamp: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2021-04-21T18:25:43.511Z"); return t}()), - // }}, - // State: to.Ptr(armstoragecache.HealthStateTypeTransitioning), - // StatusDescription: to.Ptr("Cache is being created."), - // }, - // MountAddresses: []*string{ - // to.Ptr("192.168.1.1"), - // to.Ptr("192.168.1.2")}, - // NetworkSettings: &armstoragecache.CacheNetworkSettings{ - // DNSSearchDomain: to.Ptr("contoso.com"), - // DNSServers: []*string{ - // to.Ptr("10.1.22.33"), - // to.Ptr("10.1.12.33")}, - // Mtu: to.Ptr[int32](1500), - // NtpServer: to.Ptr("time.contoso.com"), - // }, - // PrimingJobs: []*armstoragecache.PrimingJob{ - // { - // PrimingJobDetails: to.Ptr("Files: Cached=635, Failed=0, Excluded=80, Data=346030 bytes, Directories: Cached=1003, Failed=0, Excluded=0"), - // PrimingJobID: to.Ptr("00000000000_0000000000"), - // PrimingJobName: to.Ptr("contosoJob1"), - // PrimingJobPercentComplete: to.Ptr[float64](100), - // PrimingJobState: to.Ptr(armstoragecache.PrimingJobStateComplete), - // PrimingJobStatus: to.Ptr("success"), - // }, - // { - // PrimingJobDetails: to.Ptr(""), - // PrimingJobID: to.Ptr("11111111111_1111111111"), - // PrimingJobName: to.Ptr("contosoJob2"), - // PrimingJobPercentComplete: to.Ptr[float64](0), - // PrimingJobState: to.Ptr(armstoragecache.PrimingJobStateQueued), - // PrimingJobStatus: to.Ptr(""), - // }}, - // ProvisioningState: to.Ptr(armstoragecache.ProvisioningStateTypeSucceeded), - // SecuritySettings: &armstoragecache.CacheSecuritySettings{ - // AccessPolicies: []*armstoragecache.NfsAccessPolicy{ - // { - // Name: to.Ptr("default"), - // AccessRules: []*armstoragecache.NfsAccessRule{ - // { - // Access: to.Ptr(armstoragecache.NfsAccessRuleAccessRw), - // RootSquash: to.Ptr(false), - // Scope: to.Ptr(armstoragecache.NfsAccessRuleScopeDefault), - // SubmountAccess: to.Ptr(true), - // Suid: to.Ptr(false), - // }}, - // }}, - // }, - // SpaceAllocation: []*armstoragecache.StorageTargetSpaceAllocation{ - // }, - // Subnet: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/sub1"), - // UpgradeSettings: &armstoragecache.CacheUpgradeSettings{ - // ScheduledTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-04-26T18:25:43.511Z"); return t}()), - // UpgradeScheduleEnabled: to.Ptr(true), - // }, - // UpgradeStatus: &armstoragecache.CacheUpgradeStatus{ - // CurrentFirmwareVersion: to.Ptr("2022.08.1"), - // FirmwareUpdateDeadline: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-04-21T18:25:43.511Z"); return t}()), - // FirmwareUpdateStatus: to.Ptr(armstoragecache.FirmwareStatusTypeAvailable), - // LastFirmwareUpdate: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-01-21T18:25:43.511Z"); return t}()), - // PendingFirmwareVersion: to.Ptr("2022.08.1"), - // }, - // Zones: []*string{ - // to.Ptr("1")}, - // }, - // SKU: &armstoragecache.CacheSKU{ - // Name: to.Ptr("Standard_2G"), - // }, - // SystemData: &armstoragecache.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T17:18:19.123Z"); return t}()), - // CreatedBy: to.Ptr("user1"), - // CreatedByType: to.Ptr(armstoragecache.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-02T17:18:19.123Z"); return t}()), - // LastModifiedBy: to.Ptr("user2"), - // LastModifiedByType: to.Ptr(armstoragecache.CreatedByTypeUser), - // }, - // Tags: map[string]*string{ - // "Dept": to.Ptr("Contoso"), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/Caches_CreateOrUpdate_ldap_only.json -func ExampleCachesClient_BeginCreateOrUpdate_cachesCreateOrUpdateLdapOnly() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewCachesClient().BeginCreateOrUpdate(ctx, "scgroup", "sc1", armstoragecache.Cache{ - Location: to.Ptr("westus"), - Properties: &armstoragecache.CacheProperties{ - CacheSizeGB: to.Ptr[int32](3072), - DirectoryServicesSettings: &armstoragecache.CacheDirectorySettings{ - UsernameDownload: &armstoragecache.CacheUsernameDownloadSettings{ - Credentials: &armstoragecache.CacheUsernameDownloadSettingsCredentials{ - BindDn: to.Ptr("cn=ldapadmin,dc=contosoad,dc=contoso,dc=local"), - BindPassword: to.Ptr(""), - }, - ExtendedGroups: to.Ptr(true), - LdapBaseDN: to.Ptr("dc=contosoad,dc=contoso,dc=local"), - LdapServer: to.Ptr("192.0.2.12"), - UsernameSource: to.Ptr(armstoragecache.UsernameSourceLDAP), - }, - }, - EncryptionSettings: &armstoragecache.CacheEncryptionSettings{ - KeyEncryptionKey: &armstoragecache.KeyVaultKeyReference{ - KeyURL: to.Ptr("https://keyvault-cmk.vault.azure.net/keys/key2048/test"), - SourceVault: &armstoragecache.KeyVaultKeyReferenceSourceVault{ - ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.KeyVault/vaults/keyvault-cmk"), - }, - }, - }, - SecuritySettings: &armstoragecache.CacheSecuritySettings{ - AccessPolicies: []*armstoragecache.NfsAccessPolicy{ - { - Name: to.Ptr("default"), - AccessRules: []*armstoragecache.NfsAccessRule{ - { - Access: to.Ptr(armstoragecache.NfsAccessRuleAccessRw), - RootSquash: to.Ptr(false), - Scope: to.Ptr(armstoragecache.NfsAccessRuleScopeDefault), - SubmountAccess: to.Ptr(true), - Suid: to.Ptr(false), - }}, - }}, - }, - Subnet: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/sub1"), - UpgradeSettings: &armstoragecache.CacheUpgradeSettings{ - ScheduledTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-04-26T18:25:43.511Z"); return t }()), - UpgradeScheduleEnabled: to.Ptr(true), - }, - }, - SKU: &armstoragecache.CacheSKU{ - Name: to.Ptr("Standard_2G"), - }, - Tags: map[string]*string{ - "Dept": to.Ptr("Contoso"), - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.Cache = armstoragecache.Cache{ - // Name: to.Ptr("sc1"), - // Type: to.Ptr("Microsoft.StorageCache/Cache"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.StorageCache/caches/sc1"), - // Location: to.Ptr("westus"), - // Properties: &armstoragecache.CacheProperties{ - // CacheSizeGB: to.Ptr[int32](3072), - // DirectoryServicesSettings: &armstoragecache.CacheDirectorySettings{ - // UsernameDownload: &armstoragecache.CacheUsernameDownloadSettings{ - // AutoDownloadCertificate: to.Ptr(false), - // CaCertificateURI: to.Ptr("http://contoso.net/cacert.pem"), - // EncryptLdapConnection: to.Ptr(false), - // ExtendedGroups: to.Ptr(true), - // GroupFileURI: to.Ptr("http://contoso.net/group.file"), - // LdapBaseDN: to.Ptr("dc=contosoad,dc=contoso,dc=local"), - // LdapServer: to.Ptr("192.0.2.12"), - // RequireValidCertificate: to.Ptr(false), - // UserFileURI: to.Ptr("http://contoso.net/passwd.file"), - // UsernameDownloaded: to.Ptr(armstoragecache.UsernameDownloadedTypeYes), - // UsernameSource: to.Ptr(armstoragecache.UsernameSourceLDAP), - // }, - // }, - // EncryptionSettings: &armstoragecache.CacheEncryptionSettings{ - // KeyEncryptionKey: &armstoragecache.KeyVaultKeyReference{ - // KeyURL: to.Ptr("https://keyvault-cmk.vault.azure.net/keys/key2048/test"), - // SourceVault: &armstoragecache.KeyVaultKeyReferenceSourceVault{ - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.KeyVault/vaults/keyvault-cmk"), - // }, - // }, - // }, - // Health: &armstoragecache.CacheHealth{ - // Conditions: []*armstoragecache.Condition{ - // { - // Message: to.Ptr("Cannot contact DNS server"), - // Timestamp: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2021-04-21T18:25:43.511Z"); return t}()), - // }}, - // State: to.Ptr(armstoragecache.HealthStateTypeTransitioning), - // StatusDescription: to.Ptr("Cache is being created."), - // }, - // MountAddresses: []*string{ - // to.Ptr("192.168.1.1"), - // to.Ptr("192.168.1.2")}, - // NetworkSettings: &armstoragecache.CacheNetworkSettings{ - // DNSSearchDomain: to.Ptr("contoso.com"), - // DNSServers: []*string{ - // to.Ptr("10.1.22.33"), - // to.Ptr("10.1.12.33")}, - // Mtu: to.Ptr[int32](1500), - // NtpServer: to.Ptr("time.contoso.com"), - // }, - // PrimingJobs: []*armstoragecache.PrimingJob{ - // { - // PrimingJobDetails: to.Ptr("Files: Cached=635, Failed=0, Excluded=80, Data=346030 bytes, Directories: Cached=1003, Failed=0, Excluded=0"), - // PrimingJobID: to.Ptr("00000000000_0000000000"), - // PrimingJobName: to.Ptr("contosoJob1"), - // PrimingJobPercentComplete: to.Ptr[float64](100), - // PrimingJobState: to.Ptr(armstoragecache.PrimingJobStateComplete), - // PrimingJobStatus: to.Ptr("success"), - // }, - // { - // PrimingJobDetails: to.Ptr(""), - // PrimingJobID: to.Ptr("11111111111_1111111111"), - // PrimingJobName: to.Ptr("contosoJob2"), - // PrimingJobPercentComplete: to.Ptr[float64](0), - // PrimingJobState: to.Ptr(armstoragecache.PrimingJobStateQueued), - // PrimingJobStatus: to.Ptr(""), - // }}, - // ProvisioningState: to.Ptr(armstoragecache.ProvisioningStateTypeSucceeded), - // SecuritySettings: &armstoragecache.CacheSecuritySettings{ - // AccessPolicies: []*armstoragecache.NfsAccessPolicy{ - // { - // Name: to.Ptr("default"), - // AccessRules: []*armstoragecache.NfsAccessRule{ - // { - // Access: to.Ptr(armstoragecache.NfsAccessRuleAccessRw), - // RootSquash: to.Ptr(false), - // Scope: to.Ptr(armstoragecache.NfsAccessRuleScopeDefault), - // SubmountAccess: to.Ptr(true), - // Suid: to.Ptr(false), - // }}, - // }}, - // }, - // SpaceAllocation: []*armstoragecache.StorageTargetSpaceAllocation{ - // }, - // Subnet: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/sub1"), - // UpgradeSettings: &armstoragecache.CacheUpgradeSettings{ - // ScheduledTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-04-26T18:25:43.511Z"); return t}()), - // UpgradeScheduleEnabled: to.Ptr(true), - // }, - // UpgradeStatus: &armstoragecache.CacheUpgradeStatus{ - // CurrentFirmwareVersion: to.Ptr("2022.08.1"), - // FirmwareUpdateDeadline: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-04-21T18:25:43.511Z"); return t}()), - // FirmwareUpdateStatus: to.Ptr(armstoragecache.FirmwareStatusTypeAvailable), - // LastFirmwareUpdate: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-01-21T18:25:43.511Z"); return t}()), - // PendingFirmwareVersion: to.Ptr("2022.08.1"), - // }, - // Zones: []*string{ - // to.Ptr("1")}, - // }, - // SKU: &armstoragecache.CacheSKU{ - // Name: to.Ptr("Standard_2G"), - // }, - // SystemData: &armstoragecache.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T17:18:19.123Z"); return t}()), - // CreatedBy: to.Ptr("user1"), - // CreatedByType: to.Ptr(armstoragecache.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-02T17:18:19.123Z"); return t}()), - // LastModifiedBy: to.Ptr("user2"), - // LastModifiedByType: to.Ptr(armstoragecache.CreatedByTypeUser), - // }, - // Tags: map[string]*string{ - // "Dept": to.Ptr("Contoso"), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/Caches_Update.json -func ExampleCachesClient_BeginUpdate_cachesUpdate() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewCachesClient().BeginUpdate(ctx, "scgroup", "sc1", armstoragecache.Cache{ - Location: to.Ptr("westus"), - Properties: &armstoragecache.CacheProperties{ - CacheSizeGB: to.Ptr[int32](3072), - DirectoryServicesSettings: &armstoragecache.CacheDirectorySettings{ - ActiveDirectory: &armstoragecache.CacheActiveDirectorySettings{ - CacheNetBiosName: to.Ptr("contosoSmb"), - DomainName: to.Ptr("contosoAd.contoso.local"), - DomainNetBiosName: to.Ptr("contosoAd"), - PrimaryDNSIPAddress: to.Ptr("192.0.2.10"), - SecondaryDNSIPAddress: to.Ptr("192.0.2.11"), - }, - UsernameDownload: &armstoragecache.CacheUsernameDownloadSettings{ - ExtendedGroups: to.Ptr(true), - UsernameSource: to.Ptr(armstoragecache.UsernameSourceAD), - }, - }, - NetworkSettings: &armstoragecache.CacheNetworkSettings{ - DNSSearchDomain: to.Ptr("contoso.com"), - DNSServers: []*string{ - to.Ptr("10.1.22.33"), - to.Ptr("10.1.12.33")}, - Mtu: to.Ptr[int32](1500), - NtpServer: to.Ptr("time.contoso.com"), - }, - SecuritySettings: &armstoragecache.CacheSecuritySettings{ - AccessPolicies: []*armstoragecache.NfsAccessPolicy{ - { - Name: to.Ptr("default"), - AccessRules: []*armstoragecache.NfsAccessRule{ - { - Access: to.Ptr(armstoragecache.NfsAccessRuleAccessRw), - RootSquash: to.Ptr(false), - Scope: to.Ptr(armstoragecache.NfsAccessRuleScopeDefault), - SubmountAccess: to.Ptr(true), - Suid: to.Ptr(false), - }}, - }, - { - Name: to.Ptr("restrictive"), - AccessRules: []*armstoragecache.NfsAccessRule{ - { - Access: to.Ptr(armstoragecache.NfsAccessRuleAccessRw), - Filter: to.Ptr("10.99.3.145"), - RootSquash: to.Ptr(false), - Scope: to.Ptr(armstoragecache.NfsAccessRuleScopeHost), - SubmountAccess: to.Ptr(true), - Suid: to.Ptr(true), - }, - { - Access: to.Ptr(armstoragecache.NfsAccessRuleAccessRw), - Filter: to.Ptr("10.99.1.0/24"), - RootSquash: to.Ptr(false), - Scope: to.Ptr(armstoragecache.NfsAccessRuleScopeNetwork), - SubmountAccess: to.Ptr(true), - Suid: to.Ptr(true), - }, - { - Access: to.Ptr(armstoragecache.NfsAccessRuleAccessNo), - AnonymousGID: to.Ptr("65534"), - AnonymousUID: to.Ptr("65534"), - RootSquash: to.Ptr(true), - Scope: to.Ptr(armstoragecache.NfsAccessRuleScopeDefault), - SubmountAccess: to.Ptr(true), - Suid: to.Ptr(false), - }}, - }}, - }, - Subnet: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/sub1"), - UpgradeSettings: &armstoragecache.CacheUpgradeSettings{ - ScheduledTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-04-26T18:25:43.511Z"); return t }()), - UpgradeScheduleEnabled: to.Ptr(true), - }, - }, - SKU: &armstoragecache.CacheSKU{ - Name: to.Ptr("Standard_2G"), - }, - Tags: map[string]*string{ - "Dept": to.Ptr("Contoso"), - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.Cache = armstoragecache.Cache{ - // Name: to.Ptr("sc1"), - // Type: to.Ptr("Microsoft.StorageCache/Cache"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.StorageCache/caches/sc1"), - // Location: to.Ptr("westus"), - // Properties: &armstoragecache.CacheProperties{ - // CacheSizeGB: to.Ptr[int32](3072), - // DirectoryServicesSettings: &armstoragecache.CacheDirectorySettings{ - // ActiveDirectory: &armstoragecache.CacheActiveDirectorySettings{ - // CacheNetBiosName: to.Ptr("contosoSmb"), - // DomainJoined: to.Ptr(armstoragecache.DomainJoinedTypeYes), - // DomainName: to.Ptr("contosoAd.contoso.local"), - // DomainNetBiosName: to.Ptr("contosoAd"), - // PrimaryDNSIPAddress: to.Ptr("192.0.2.10"), - // SecondaryDNSIPAddress: to.Ptr("192.0.2.11"), - // }, - // UsernameDownload: &armstoragecache.CacheUsernameDownloadSettings{ - // AutoDownloadCertificate: to.Ptr(false), - // CaCertificateURI: to.Ptr("http://contoso.net/cacert.pem"), - // EncryptLdapConnection: to.Ptr(false), - // ExtendedGroups: to.Ptr(true), - // GroupFileURI: to.Ptr("http://contoso.net/group.file"), - // LdapBaseDN: to.Ptr(""), - // LdapServer: to.Ptr(""), - // RequireValidCertificate: to.Ptr(false), - // UserFileURI: to.Ptr("http://contoso.net/passwd.file"), - // UsernameDownloaded: to.Ptr(armstoragecache.UsernameDownloadedTypeYes), - // UsernameSource: to.Ptr(armstoragecache.UsernameSourceAD), - // }, - // }, - // Health: &armstoragecache.CacheHealth{ - // Conditions: []*armstoragecache.Condition{ - // { - // Message: to.Ptr("Cannot contact DNS server"), - // Timestamp: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2021-04-21T18:25:43.511Z"); return t}()), - // }}, - // State: to.Ptr(armstoragecache.HealthStateTypeTransitioning), - // StatusDescription: to.Ptr("Cache is being created."), - // }, - // MountAddresses: []*string{ - // to.Ptr("192.168.1.1"), - // to.Ptr("192.168.1.2")}, - // NetworkSettings: &armstoragecache.CacheNetworkSettings{ - // DNSSearchDomain: to.Ptr("contoso.com"), - // DNSServers: []*string{ - // to.Ptr("10.1.22.33"), - // to.Ptr("10.1.12.33")}, - // Mtu: to.Ptr[int32](1500), - // NtpServer: to.Ptr("time.contoso.com"), - // }, - // PrimingJobs: []*armstoragecache.PrimingJob{ - // { - // PrimingJobDetails: to.Ptr("Files: Cached=635, Failed=0, Excluded=80, Data=346030 bytes, Directories: Cached=1003, Failed=0, Excluded=0"), - // PrimingJobID: to.Ptr("00000000000_0000000000"), - // PrimingJobName: to.Ptr("contosoJob1"), - // PrimingJobPercentComplete: to.Ptr[float64](100), - // PrimingJobState: to.Ptr(armstoragecache.PrimingJobStateComplete), - // PrimingJobStatus: to.Ptr("success"), - // }, - // { - // PrimingJobDetails: to.Ptr(""), - // PrimingJobID: to.Ptr("11111111111_1111111111"), - // PrimingJobName: to.Ptr("contosoJob2"), - // PrimingJobPercentComplete: to.Ptr[float64](0), - // PrimingJobState: to.Ptr(armstoragecache.PrimingJobStateQueued), - // PrimingJobStatus: to.Ptr(""), - // }}, - // ProvisioningState: to.Ptr(armstoragecache.ProvisioningStateTypeSucceeded), - // SecuritySettings: &armstoragecache.CacheSecuritySettings{ - // AccessPolicies: []*armstoragecache.NfsAccessPolicy{ - // { - // Name: to.Ptr("default"), - // AccessRules: []*armstoragecache.NfsAccessRule{ - // { - // Access: to.Ptr(armstoragecache.NfsAccessRuleAccessRw), - // RootSquash: to.Ptr(false), - // Scope: to.Ptr(armstoragecache.NfsAccessRuleScopeDefault), - // SubmountAccess: to.Ptr(true), - // Suid: to.Ptr(false), - // }}, - // }, - // { - // Name: to.Ptr("restrictive"), - // AccessRules: []*armstoragecache.NfsAccessRule{ - // { - // Access: to.Ptr(armstoragecache.NfsAccessRuleAccessRw), - // Filter: to.Ptr("10.99.3.145"), - // RootSquash: to.Ptr(false), - // Scope: to.Ptr(armstoragecache.NfsAccessRuleScopeHost), - // SubmountAccess: to.Ptr(true), - // Suid: to.Ptr(true), - // }, - // { - // Access: to.Ptr(armstoragecache.NfsAccessRuleAccessRw), - // Filter: to.Ptr("10.99.1.0/24"), - // RootSquash: to.Ptr(false), - // Scope: to.Ptr(armstoragecache.NfsAccessRuleScopeNetwork), - // SubmountAccess: to.Ptr(true), - // Suid: to.Ptr(true), - // }, - // { - // Access: to.Ptr(armstoragecache.NfsAccessRuleAccessNo), - // AnonymousGID: to.Ptr("65534"), - // AnonymousUID: to.Ptr("65534"), - // RootSquash: to.Ptr(true), - // Scope: to.Ptr(armstoragecache.NfsAccessRuleScopeDefault), - // SubmountAccess: to.Ptr(true), - // Suid: to.Ptr(false), - // }}, - // }}, - // }, - // SpaceAllocation: []*armstoragecache.StorageTargetSpaceAllocation{ - // { - // Name: to.Ptr("st1"), - // AllocationPercentage: to.Ptr[int32](25), - // }, - // { - // Name: to.Ptr("st2"), - // AllocationPercentage: to.Ptr[int32](50), - // }, - // { - // Name: to.Ptr("st3"), - // AllocationPercentage: to.Ptr[int32](25), - // }}, - // Subnet: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/sub1"), - // UpgradeSettings: &armstoragecache.CacheUpgradeSettings{ - // ScheduledTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-04-26T18:25:43.511Z"); return t}()), - // UpgradeScheduleEnabled: to.Ptr(true), - // }, - // UpgradeStatus: &armstoragecache.CacheUpgradeStatus{ - // CurrentFirmwareVersion: to.Ptr("2022.08.1"), - // FirmwareUpdateDeadline: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-04-21T18:25:43.511Z"); return t}()), - // FirmwareUpdateStatus: to.Ptr(armstoragecache.FirmwareStatusTypeAvailable), - // LastFirmwareUpdate: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-01-21T18:25:43.511Z"); return t}()), - // PendingFirmwareVersion: to.Ptr("2022.08.1"), - // }, - // }, - // SKU: &armstoragecache.CacheSKU{ - // Name: to.Ptr("Standard_2G"), - // }, - // SystemData: &armstoragecache.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T17:18:19.123Z"); return t}()), - // CreatedBy: to.Ptr("user1"), - // CreatedByType: to.Ptr(armstoragecache.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-02T17:18:19.123Z"); return t}()), - // LastModifiedBy: to.Ptr("user2"), - // LastModifiedByType: to.Ptr(armstoragecache.CreatedByTypeUser), - // }, - // Tags: map[string]*string{ - // "Dept": to.Ptr("Contoso"), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/Caches_Update_ldap_only.json -func ExampleCachesClient_BeginUpdate_cachesUpdateLdapOnly() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewCachesClient().BeginUpdate(ctx, "scgroup", "sc1", armstoragecache.Cache{ - Location: to.Ptr("westus"), - Properties: &armstoragecache.CacheProperties{ - CacheSizeGB: to.Ptr[int32](3072), - DirectoryServicesSettings: &armstoragecache.CacheDirectorySettings{ - UsernameDownload: &armstoragecache.CacheUsernameDownloadSettings{ - Credentials: &armstoragecache.CacheUsernameDownloadSettingsCredentials{ - BindDn: to.Ptr("cn=ldapadmin,dc=contosoad,dc=contoso,dc=local"), - BindPassword: to.Ptr(""), - }, - ExtendedGroups: to.Ptr(true), - LdapBaseDN: to.Ptr("dc=contosoad,dc=contoso,dc=local"), - LdapServer: to.Ptr("192.0.2.12"), - UsernameSource: to.Ptr(armstoragecache.UsernameSourceLDAP), - }, - }, - NetworkSettings: &armstoragecache.CacheNetworkSettings{ - DNSSearchDomain: to.Ptr("contoso.com"), - DNSServers: []*string{ - to.Ptr("10.1.22.33"), - to.Ptr("10.1.12.33")}, - Mtu: to.Ptr[int32](1500), - NtpServer: to.Ptr("time.contoso.com"), - }, - SecuritySettings: &armstoragecache.CacheSecuritySettings{ - AccessPolicies: []*armstoragecache.NfsAccessPolicy{ - { - Name: to.Ptr("default"), - AccessRules: []*armstoragecache.NfsAccessRule{ - { - Access: to.Ptr(armstoragecache.NfsAccessRuleAccessRw), - RootSquash: to.Ptr(false), - Scope: to.Ptr(armstoragecache.NfsAccessRuleScopeDefault), - SubmountAccess: to.Ptr(true), - Suid: to.Ptr(false), - }}, - }, - { - Name: to.Ptr("restrictive"), - AccessRules: []*armstoragecache.NfsAccessRule{ - { - Access: to.Ptr(armstoragecache.NfsAccessRuleAccessRw), - Filter: to.Ptr("10.99.3.145"), - RootSquash: to.Ptr(false), - Scope: to.Ptr(armstoragecache.NfsAccessRuleScopeHost), - SubmountAccess: to.Ptr(true), - Suid: to.Ptr(true), - }, - { - Access: to.Ptr(armstoragecache.NfsAccessRuleAccessRw), - Filter: to.Ptr("10.99.1.0/24"), - RootSquash: to.Ptr(false), - Scope: to.Ptr(armstoragecache.NfsAccessRuleScopeNetwork), - SubmountAccess: to.Ptr(true), - Suid: to.Ptr(true), - }, - { - Access: to.Ptr(armstoragecache.NfsAccessRuleAccessNo), - AnonymousGID: to.Ptr("65534"), - AnonymousUID: to.Ptr("65534"), - RootSquash: to.Ptr(true), - Scope: to.Ptr(armstoragecache.NfsAccessRuleScopeDefault), - SubmountAccess: to.Ptr(true), - Suid: to.Ptr(false), - }}, - }}, - }, - Subnet: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/sub1"), - UpgradeSettings: &armstoragecache.CacheUpgradeSettings{ - ScheduledTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-04-26T18:25:43.511Z"); return t }()), - UpgradeScheduleEnabled: to.Ptr(true), - }, - }, - SKU: &armstoragecache.CacheSKU{ - Name: to.Ptr("Standard_2G"), - }, - Tags: map[string]*string{ - "Dept": to.Ptr("Contoso"), - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.Cache = armstoragecache.Cache{ - // Name: to.Ptr("sc1"), - // Type: to.Ptr("Microsoft.StorageCache/Cache"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.StorageCache/caches/sc1"), - // Location: to.Ptr("westus"), - // Properties: &armstoragecache.CacheProperties{ - // CacheSizeGB: to.Ptr[int32](3072), - // DirectoryServicesSettings: &armstoragecache.CacheDirectorySettings{ - // UsernameDownload: &armstoragecache.CacheUsernameDownloadSettings{ - // AutoDownloadCertificate: to.Ptr(false), - // CaCertificateURI: to.Ptr("http://contoso.net/cacert.pem"), - // EncryptLdapConnection: to.Ptr(false), - // ExtendedGroups: to.Ptr(true), - // GroupFileURI: to.Ptr("http://contoso.net/group.file"), - // LdapBaseDN: to.Ptr("dc=contosoad,dc=contoso,dc=local"), - // LdapServer: to.Ptr("192.0.2.12"), - // RequireValidCertificate: to.Ptr(false), - // UserFileURI: to.Ptr("http://contoso.net/passwd.file"), - // UsernameDownloaded: to.Ptr(armstoragecache.UsernameDownloadedTypeNo), - // UsernameSource: to.Ptr(armstoragecache.UsernameSourceLDAP), - // }, - // }, - // Health: &armstoragecache.CacheHealth{ - // Conditions: []*armstoragecache.Condition{ - // { - // Message: to.Ptr("Cannot contact DNS server"), - // Timestamp: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2021-04-21T18:25:43.511Z"); return t}()), - // }}, - // State: to.Ptr(armstoragecache.HealthStateTypeTransitioning), - // StatusDescription: to.Ptr("Cache is being created."), - // }, - // MountAddresses: []*string{ - // to.Ptr("192.168.1.1"), - // to.Ptr("192.168.1.2")}, - // NetworkSettings: &armstoragecache.CacheNetworkSettings{ - // DNSSearchDomain: to.Ptr("contoso.com"), - // DNSServers: []*string{ - // to.Ptr("10.1.22.33"), - // to.Ptr("10.1.12.33")}, - // Mtu: to.Ptr[int32](1500), - // NtpServer: to.Ptr("time.contoso.com"), - // }, - // PrimingJobs: []*armstoragecache.PrimingJob{ - // { - // PrimingJobDetails: to.Ptr("Files: Cached=635, Failed=0, Excluded=80, Data=346030 bytes, Directories: Cached=1003, Failed=0, Excluded=0"), - // PrimingJobID: to.Ptr("00000000000_0000000000"), - // PrimingJobName: to.Ptr("contosoJob1"), - // PrimingJobPercentComplete: to.Ptr[float64](100), - // PrimingJobState: to.Ptr(armstoragecache.PrimingJobStateComplete), - // PrimingJobStatus: to.Ptr("success"), - // }, - // { - // PrimingJobDetails: to.Ptr(""), - // PrimingJobID: to.Ptr("11111111111_1111111111"), - // PrimingJobName: to.Ptr("contosoJob2"), - // PrimingJobPercentComplete: to.Ptr[float64](0), - // PrimingJobState: to.Ptr(armstoragecache.PrimingJobStateQueued), - // PrimingJobStatus: to.Ptr(""), - // }}, - // ProvisioningState: to.Ptr(armstoragecache.ProvisioningStateTypeSucceeded), - // SecuritySettings: &armstoragecache.CacheSecuritySettings{ - // AccessPolicies: []*armstoragecache.NfsAccessPolicy{ - // { - // Name: to.Ptr("default"), - // AccessRules: []*armstoragecache.NfsAccessRule{ - // { - // Access: to.Ptr(armstoragecache.NfsAccessRuleAccessRw), - // RootSquash: to.Ptr(false), - // Scope: to.Ptr(armstoragecache.NfsAccessRuleScopeDefault), - // SubmountAccess: to.Ptr(true), - // Suid: to.Ptr(false), - // }}, - // }, - // { - // Name: to.Ptr("restrictive"), - // AccessRules: []*armstoragecache.NfsAccessRule{ - // { - // Access: to.Ptr(armstoragecache.NfsAccessRuleAccessRw), - // Filter: to.Ptr("10.99.3.145"), - // RootSquash: to.Ptr(false), - // Scope: to.Ptr(armstoragecache.NfsAccessRuleScopeHost), - // SubmountAccess: to.Ptr(true), - // Suid: to.Ptr(true), - // }, - // { - // Access: to.Ptr(armstoragecache.NfsAccessRuleAccessRw), - // Filter: to.Ptr("10.99.1.0/24"), - // RootSquash: to.Ptr(false), - // Scope: to.Ptr(armstoragecache.NfsAccessRuleScopeNetwork), - // SubmountAccess: to.Ptr(true), - // Suid: to.Ptr(true), - // }, - // { - // Access: to.Ptr(armstoragecache.NfsAccessRuleAccessNo), - // AnonymousGID: to.Ptr("65534"), - // AnonymousUID: to.Ptr("65534"), - // RootSquash: to.Ptr(true), - // Scope: to.Ptr(armstoragecache.NfsAccessRuleScopeDefault), - // SubmountAccess: to.Ptr(true), - // Suid: to.Ptr(false), - // }}, - // }}, - // }, - // SpaceAllocation: []*armstoragecache.StorageTargetSpaceAllocation{ - // }, - // Subnet: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/sub1"), - // UpgradeSettings: &armstoragecache.CacheUpgradeSettings{ - // ScheduledTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-04-26T18:25:43.511Z"); return t}()), - // UpgradeScheduleEnabled: to.Ptr(true), - // }, - // UpgradeStatus: &armstoragecache.CacheUpgradeStatus{ - // CurrentFirmwareVersion: to.Ptr("2022.08.1"), - // FirmwareUpdateDeadline: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-04-21T18:25:43.511Z"); return t}()), - // FirmwareUpdateStatus: to.Ptr(armstoragecache.FirmwareStatusTypeAvailable), - // LastFirmwareUpdate: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-01-21T18:25:43.511Z"); return t}()), - // PendingFirmwareVersion: to.Ptr("2022.08.1"), - // }, - // }, - // SKU: &armstoragecache.CacheSKU{ - // Name: to.Ptr("Standard_2G"), - // }, - // SystemData: &armstoragecache.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T17:18:19.123Z"); return t}()), - // CreatedBy: to.Ptr("user1"), - // CreatedByType: to.Ptr(armstoragecache.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-02T17:18:19.123Z"); return t}()), - // LastModifiedBy: to.Ptr("user2"), - // LastModifiedByType: to.Ptr(armstoragecache.CreatedByTypeUser), - // }, - // Tags: map[string]*string{ - // "Dept": to.Ptr("Contoso"), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/Caches_DebugInfo.json -func ExampleCachesClient_BeginDebugInfo() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewCachesClient().BeginDebugInfo(ctx, "scgroup", "sc", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/Caches_Flush.json -func ExampleCachesClient_BeginFlush() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewCachesClient().BeginFlush(ctx, "scgroup", "sc", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/Caches_Start.json -func ExampleCachesClient_BeginStart() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewCachesClient().BeginStart(ctx, "scgroup", "sc", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/Caches_Stop.json -func ExampleCachesClient_BeginStop() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewCachesClient().BeginStop(ctx, "scgroup", "sc", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/StartPrimingJob.json -func ExampleCachesClient_BeginStartPrimingJob() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewCachesClient().BeginStartPrimingJob(ctx, "scgroup", "sc1", &armstoragecache.CachesClientBeginStartPrimingJobOptions{Primingjob: &armstoragecache.PrimingJob{ - PrimingJobName: to.Ptr("contosoJob"), - PrimingManifestURL: to.Ptr("https://contosostorage.blob.core.windows.net/contosoblob/00000000_00000000000000000000000000000000.00000000000.FFFFFFFF.00000000?sp=r&st=2021-08-11T19:33:35Z&se=2021-08-12T03:33:35Z&spr=https&sv=2020-08-04&sr=b&sig="), - }, - }) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/StopPrimingJob.json -func ExampleCachesClient_BeginStopPrimingJob() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewCachesClient().BeginStopPrimingJob(ctx, "scgroup", "sc1", &armstoragecache.CachesClientBeginStopPrimingJobOptions{PrimingJobID: &armstoragecache.PrimingJobIDParameter{ - PrimingJobID: to.Ptr("00000000000_0000000000"), - }, - }) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/PausePrimingJob.json -func ExampleCachesClient_BeginPausePrimingJob() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewCachesClient().BeginPausePrimingJob(ctx, "scgroup", "sc1", &armstoragecache.CachesClientBeginPausePrimingJobOptions{PrimingJobID: &armstoragecache.PrimingJobIDParameter{ - PrimingJobID: to.Ptr("00000000000_0000000000"), - }, - }) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/ResumePrimingJob.json -func ExampleCachesClient_BeginResumePrimingJob() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewCachesClient().BeginResumePrimingJob(ctx, "scgroup", "sc1", &armstoragecache.CachesClientBeginResumePrimingJobOptions{PrimingJobID: &armstoragecache.PrimingJobIDParameter{ - PrimingJobID: to.Ptr("00000000000_0000000000"), - }, - }) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/Caches_UpgradeFirmware.json -func ExampleCachesClient_BeginUpgradeFirmware() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewCachesClient().BeginUpgradeFirmware(ctx, "scgroup", "sc1", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/SpaceAllocation_Post.json -func ExampleCachesClient_BeginSpaceAllocation() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewCachesClient().BeginSpaceAllocation(ctx, "scgroup", "sc1", &armstoragecache.CachesClientBeginSpaceAllocationOptions{SpaceAllocation: []*armstoragecache.StorageTargetSpaceAllocation{ - { - Name: to.Ptr("st1"), - AllocationPercentage: to.Ptr[int32](25), - }, - { - Name: to.Ptr("st2"), - AllocationPercentage: to.Ptr[int32](50), - }, - { - Name: to.Ptr("st3"), - AllocationPercentage: to.Ptr[int32](25), - }}, - }) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} diff --git a/sdk/resourcemanager/storagecache/armstoragecache/client_factory.go b/sdk/resourcemanager/storagecache/armstoragecache/client_factory.go index 8c326c51213d..469dc603d4ef 100644 --- a/sdk/resourcemanager/storagecache/armstoragecache/client_factory.go +++ b/sdk/resourcemanager/storagecache/armstoragecache/client_factory.go @@ -60,6 +60,14 @@ func (c *ClientFactory) NewAscUsagesClient() *AscUsagesClient { } } +// NewAutoExportJobsClient creates a new instance of AutoExportJobsClient. +func (c *ClientFactory) NewAutoExportJobsClient() *AutoExportJobsClient { + return &AutoExportJobsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } +} + // NewCachesClient creates a new instance of CachesClient. func (c *ClientFactory) NewCachesClient() *CachesClient { return &CachesClient{ diff --git a/sdk/resourcemanager/storagecache/armstoragecache/constants.go b/sdk/resourcemanager/storagecache/armstoragecache/constants.go index c7e4453bdd10..8e906d534b94 100644 --- a/sdk/resourcemanager/storagecache/armstoragecache/constants.go +++ b/sdk/resourcemanager/storagecache/armstoragecache/constants.go @@ -10,7 +10,7 @@ package armstoragecache const ( moduleName = "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storagecache/armstoragecache" - moduleVersion = "v4.0.0" + moduleVersion = "v4.1.0" ) // AmlFilesystemHealthStateType - List of AML file system health states. @@ -124,6 +124,73 @@ func PossibleArchiveStatusTypeValues() []ArchiveStatusType { } } +// AutoExportJobAdminStatus - The administrative status of the auto export job. Possible values: 'Enable', 'Disable'. Passing +// in a value of 'Disable' will disable the current active auto export job. By default it is set to +// 'Enable'. +type AutoExportJobAdminStatus string + +const ( + AutoExportJobAdminStatusActive AutoExportJobAdminStatus = "Active" + AutoExportJobAdminStatusCancel AutoExportJobAdminStatus = "Cancel" +) + +// PossibleAutoExportJobAdminStatusValues returns the possible values for the AutoExportJobAdminStatus const type. +func PossibleAutoExportJobAdminStatusValues() []AutoExportJobAdminStatus { + return []AutoExportJobAdminStatus{ + AutoExportJobAdminStatusActive, + AutoExportJobAdminStatusCancel, + } +} + +// AutoExportJobProvisioningStateType - ARM provisioning state. +type AutoExportJobProvisioningStateType string + +const ( + AutoExportJobProvisioningStateTypeCanceled AutoExportJobProvisioningStateType = "Canceled" + AutoExportJobProvisioningStateTypeCreating AutoExportJobProvisioningStateType = "Creating" + AutoExportJobProvisioningStateTypeDeleting AutoExportJobProvisioningStateType = "Deleting" + AutoExportJobProvisioningStateTypeFailed AutoExportJobProvisioningStateType = "Failed" + AutoExportJobProvisioningStateTypeSucceeded AutoExportJobProvisioningStateType = "Succeeded" + AutoExportJobProvisioningStateTypeUpdating AutoExportJobProvisioningStateType = "Updating" +) + +// PossibleAutoExportJobProvisioningStateTypeValues returns the possible values for the AutoExportJobProvisioningStateType const type. +func PossibleAutoExportJobProvisioningStateTypeValues() []AutoExportJobProvisioningStateType { + return []AutoExportJobProvisioningStateType{ + AutoExportJobProvisioningStateTypeCanceled, + AutoExportJobProvisioningStateTypeCreating, + AutoExportJobProvisioningStateTypeDeleting, + AutoExportJobProvisioningStateTypeFailed, + AutoExportJobProvisioningStateTypeSucceeded, + AutoExportJobProvisioningStateTypeUpdating, + } +} + +// AutoExportStatusType - The operational state of auto export. InProgress indicates the export is running. Disabling indicates +// the user has requested to disable the export but the disabling is still in progress. Disabled +// indicates auto export has been disabled. DisableFailed indicates the disabling has failed. Failed means the export was +// unable to continue, due to a fatal error. +type AutoExportStatusType string + +const ( + AutoExportStatusTypeDisableFailed AutoExportStatusType = "DisableFailed" + AutoExportStatusTypeDisabled AutoExportStatusType = "Disabled" + AutoExportStatusTypeDisabling AutoExportStatusType = "Disabling" + AutoExportStatusTypeFailed AutoExportStatusType = "Failed" + AutoExportStatusTypeInProgress AutoExportStatusType = "InProgress" +) + +// PossibleAutoExportStatusTypeValues returns the possible values for the AutoExportStatusType const type. +func PossibleAutoExportStatusTypeValues() []AutoExportStatusType { + return []AutoExportStatusType{ + AutoExportStatusTypeDisableFailed, + AutoExportStatusTypeDisabled, + AutoExportStatusTypeDisabling, + AutoExportStatusTypeFailed, + AutoExportStatusTypeInProgress, + } +} + // CacheIdentityType - The type of identity used for the cache type CacheIdentityType string @@ -265,6 +332,23 @@ func PossibleHealthStateTypeValues() []HealthStateType { } } +// ImportJobAdminStatus - The administrative status of the import job. Possible values: 'Enable', 'Disable'. Passing in a +// value of 'Disable' will cancel the current active import job. By default it is set to 'Enable'. +type ImportJobAdminStatus string + +const ( + ImportJobAdminStatusActive ImportJobAdminStatus = "Active" + ImportJobAdminStatusCancel ImportJobAdminStatus = "Cancel" +) + +// PossibleImportJobAdminStatusValues returns the possible values for the ImportJobAdminStatus const type. +func PossibleImportJobAdminStatusValues() []ImportJobAdminStatus { + return []ImportJobAdminStatus{ + ImportJobAdminStatusActive, + ImportJobAdminStatusCancel, + } +} + // ImportJobProvisioningStateType - ARM provisioning state. type ImportJobProvisioningStateType string @@ -289,11 +373,11 @@ func PossibleImportJobProvisioningStateTypeValues() []ImportJobProvisioningState } } -// ImportStatusType - The state of the import job. InProgress indicates the import is still running. Canceled indicates it -// has been canceled by the user. Completed indicates import finished, successfully importing all -// discovered blobs into the Lustre namespace. CompletedPartial indicates the import finished but some blobs either were found -// to be conflicting and could not be imported or other errors were -// encountered. Failed means the import was unable to complete due to a fatal error. +// ImportStatusType - The operational state of the import job. InProgress indicates the import is still running. Canceled +// indicates it has been canceled by the user. Completed indicates import finished, successfully +// importing all discovered blobs into the Lustre namespace. CompletedPartial indicates the import finished but some blobs +// either were found to be conflicting and could not be imported or other errors +// were encountered. Failed means the import was unable to complete due to a fatal error. type ImportStatusType string const ( diff --git a/sdk/resourcemanager/storagecache/armstoragecache/fake/autoexportjobs_server.go b/sdk/resourcemanager/storagecache/armstoragecache/fake/autoexportjobs_server.go new file mode 100644 index 000000000000..e4849bd36057 --- /dev/null +++ b/sdk/resourcemanager/storagecache/armstoragecache/fake/autoexportjobs_server.go @@ -0,0 +1,332 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "context" + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storagecache/armstoragecache/v4" + "net/http" + "net/url" + "regexp" +) + +// AutoExportJobsServer is a fake server for instances of the armstoragecache.AutoExportJobsClient type. +type AutoExportJobsServer struct { + // BeginCreateOrUpdate is the fake for method AutoExportJobsClient.BeginCreateOrUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusCreated + BeginCreateOrUpdate func(ctx context.Context, resourceGroupName string, amlFilesystemName string, autoExportJobName string, autoExportJob armstoragecache.AutoExportJob, options *armstoragecache.AutoExportJobsClientBeginCreateOrUpdateOptions) (resp azfake.PollerResponder[armstoragecache.AutoExportJobsClientCreateOrUpdateResponse], errResp azfake.ErrorResponder) + + // BeginDelete is the fake for method AutoExportJobsClient.BeginDelete + // HTTP status codes to indicate success: http.StatusAccepted, http.StatusNoContent + BeginDelete func(ctx context.Context, resourceGroupName string, amlFilesystemName string, autoExportJobName string, options *armstoragecache.AutoExportJobsClientBeginDeleteOptions) (resp azfake.PollerResponder[armstoragecache.AutoExportJobsClientDeleteResponse], errResp azfake.ErrorResponder) + + // Get is the fake for method AutoExportJobsClient.Get + // HTTP status codes to indicate success: http.StatusOK + Get func(ctx context.Context, resourceGroupName string, amlFilesystemName string, autoExportJobName string, options *armstoragecache.AutoExportJobsClientGetOptions) (resp azfake.Responder[armstoragecache.AutoExportJobsClientGetResponse], errResp azfake.ErrorResponder) + + // NewListByAmlFilesystemPager is the fake for method AutoExportJobsClient.NewListByAmlFilesystemPager + // HTTP status codes to indicate success: http.StatusOK + NewListByAmlFilesystemPager func(resourceGroupName string, amlFilesystemName string, options *armstoragecache.AutoExportJobsClientListByAmlFilesystemOptions) (resp azfake.PagerResponder[armstoragecache.AutoExportJobsClientListByAmlFilesystemResponse]) + + // BeginUpdate is the fake for method AutoExportJobsClient.BeginUpdate + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginUpdate func(ctx context.Context, resourceGroupName string, amlFilesystemName string, autoExportJobName string, autoExportJob armstoragecache.AutoExportJobUpdate, options *armstoragecache.AutoExportJobsClientBeginUpdateOptions) (resp azfake.PollerResponder[armstoragecache.AutoExportJobsClientUpdateResponse], errResp azfake.ErrorResponder) +} + +// NewAutoExportJobsServerTransport creates a new instance of AutoExportJobsServerTransport with the provided implementation. +// The returned AutoExportJobsServerTransport instance is connected to an instance of armstoragecache.AutoExportJobsClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewAutoExportJobsServerTransport(srv *AutoExportJobsServer) *AutoExportJobsServerTransport { + return &AutoExportJobsServerTransport{ + srv: srv, + beginCreateOrUpdate: newTracker[azfake.PollerResponder[armstoragecache.AutoExportJobsClientCreateOrUpdateResponse]](), + beginDelete: newTracker[azfake.PollerResponder[armstoragecache.AutoExportJobsClientDeleteResponse]](), + newListByAmlFilesystemPager: newTracker[azfake.PagerResponder[armstoragecache.AutoExportJobsClientListByAmlFilesystemResponse]](), + beginUpdate: newTracker[azfake.PollerResponder[armstoragecache.AutoExportJobsClientUpdateResponse]](), + } +} + +// AutoExportJobsServerTransport connects instances of armstoragecache.AutoExportJobsClient to instances of AutoExportJobsServer. +// Don't use this type directly, use NewAutoExportJobsServerTransport instead. +type AutoExportJobsServerTransport struct { + srv *AutoExportJobsServer + beginCreateOrUpdate *tracker[azfake.PollerResponder[armstoragecache.AutoExportJobsClientCreateOrUpdateResponse]] + beginDelete *tracker[azfake.PollerResponder[armstoragecache.AutoExportJobsClientDeleteResponse]] + newListByAmlFilesystemPager *tracker[azfake.PagerResponder[armstoragecache.AutoExportJobsClientListByAmlFilesystemResponse]] + beginUpdate *tracker[azfake.PollerResponder[armstoragecache.AutoExportJobsClientUpdateResponse]] +} + +// Do implements the policy.Transporter interface for AutoExportJobsServerTransport. +func (a *AutoExportJobsServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "AutoExportJobsClient.BeginCreateOrUpdate": + resp, err = a.dispatchBeginCreateOrUpdate(req) + case "AutoExportJobsClient.BeginDelete": + resp, err = a.dispatchBeginDelete(req) + case "AutoExportJobsClient.Get": + resp, err = a.dispatchGet(req) + case "AutoExportJobsClient.NewListByAmlFilesystemPager": + resp, err = a.dispatchNewListByAmlFilesystemPager(req) + case "AutoExportJobsClient.BeginUpdate": + resp, err = a.dispatchBeginUpdate(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (a *AutoExportJobsServerTransport) dispatchBeginCreateOrUpdate(req *http.Request) (*http.Response, error) { + if a.srv.BeginCreateOrUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginCreateOrUpdate not implemented")} + } + beginCreateOrUpdate := a.beginCreateOrUpdate.get(req) + if beginCreateOrUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.StorageCache/amlFilesystems/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/autoExportJobs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armstoragecache.AutoExportJob](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + amlFilesystemNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("amlFilesystemName")]) + if err != nil { + return nil, err + } + autoExportJobNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("autoExportJobName")]) + if err != nil { + return nil, err + } + respr, errRespr := a.srv.BeginCreateOrUpdate(req.Context(), resourceGroupNameParam, amlFilesystemNameParam, autoExportJobNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginCreateOrUpdate = &respr + a.beginCreateOrUpdate.add(req, beginCreateOrUpdate) + } + + resp, err := server.PollerResponderNext(beginCreateOrUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusCreated}, resp.StatusCode) { + a.beginCreateOrUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusCreated", resp.StatusCode)} + } + if !server.PollerResponderMore(beginCreateOrUpdate) { + a.beginCreateOrUpdate.remove(req) + } + + return resp, nil +} + +func (a *AutoExportJobsServerTransport) dispatchBeginDelete(req *http.Request) (*http.Response, error) { + if a.srv.BeginDelete == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginDelete not implemented")} + } + beginDelete := a.beginDelete.get(req) + if beginDelete == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.StorageCache/amlFilesystems/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/autoExportJobs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + amlFilesystemNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("amlFilesystemName")]) + if err != nil { + return nil, err + } + autoExportJobNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("autoExportJobName")]) + if err != nil { + return nil, err + } + respr, errRespr := a.srv.BeginDelete(req.Context(), resourceGroupNameParam, amlFilesystemNameParam, autoExportJobNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginDelete = &respr + a.beginDelete.add(req, beginDelete) + } + + resp, err := server.PollerResponderNext(beginDelete, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusAccepted, http.StatusNoContent}, resp.StatusCode) { + a.beginDelete.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusAccepted, http.StatusNoContent", resp.StatusCode)} + } + if !server.PollerResponderMore(beginDelete) { + a.beginDelete.remove(req) + } + + return resp, nil +} + +func (a *AutoExportJobsServerTransport) dispatchGet(req *http.Request) (*http.Response, error) { + if a.srv.Get == nil { + return nil, &nonRetriableError{errors.New("fake for method Get not implemented")} + } + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.StorageCache/amlFilesystems/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/autoExportJobs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + amlFilesystemNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("amlFilesystemName")]) + if err != nil { + return nil, err + } + autoExportJobNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("autoExportJobName")]) + if err != nil { + return nil, err + } + respr, errRespr := a.srv.Get(req.Context(), resourceGroupNameParam, amlFilesystemNameParam, autoExportJobNameParam, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + respContent := server.GetResponseContent(respr) + if !contains([]int{http.StatusOK}, respContent.HTTPStatus) { + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", respContent.HTTPStatus)} + } + resp, err := server.MarshalResponseAsJSON(respContent, server.GetResponse(respr).AutoExportJob, req) + if err != nil { + return nil, err + } + return resp, nil +} + +func (a *AutoExportJobsServerTransport) dispatchNewListByAmlFilesystemPager(req *http.Request) (*http.Response, error) { + if a.srv.NewListByAmlFilesystemPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListByAmlFilesystemPager not implemented")} + } + newListByAmlFilesystemPager := a.newListByAmlFilesystemPager.get(req) + if newListByAmlFilesystemPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.StorageCache/amlFilesystems/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/autoExportJobs` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + amlFilesystemNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("amlFilesystemName")]) + if err != nil { + return nil, err + } + resp := a.srv.NewListByAmlFilesystemPager(resourceGroupNameParam, amlFilesystemNameParam, nil) + newListByAmlFilesystemPager = &resp + a.newListByAmlFilesystemPager.add(req, newListByAmlFilesystemPager) + server.PagerResponderInjectNextLinks(newListByAmlFilesystemPager, req, func(page *armstoragecache.AutoExportJobsClientListByAmlFilesystemResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListByAmlFilesystemPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + a.newListByAmlFilesystemPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListByAmlFilesystemPager) { + a.newListByAmlFilesystemPager.remove(req) + } + return resp, nil +} + +func (a *AutoExportJobsServerTransport) dispatchBeginUpdate(req *http.Request) (*http.Response, error) { + if a.srv.BeginUpdate == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpdate not implemented")} + } + beginUpdate := a.beginUpdate.get(req) + if beginUpdate == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.StorageCache/amlFilesystems/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/autoExportJobs/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armstoragecache.AutoExportJobUpdate](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + amlFilesystemNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("amlFilesystemName")]) + if err != nil { + return nil, err + } + autoExportJobNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("autoExportJobName")]) + if err != nil { + return nil, err + } + respr, errRespr := a.srv.BeginUpdate(req.Context(), resourceGroupNameParam, amlFilesystemNameParam, autoExportJobNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpdate = &respr + a.beginUpdate.add(req, beginUpdate) + } + + resp, err := server.PollerResponderNext(beginUpdate, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + a.beginUpdate.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpdate) { + a.beginUpdate.remove(req) + } + + return resp, nil +} diff --git a/sdk/resourcemanager/storagecache/armstoragecache/fake/server_factory.go b/sdk/resourcemanager/storagecache/armstoragecache/fake/server_factory.go index e442d72c7f9a..d7c79fe57b3e 100644 --- a/sdk/resourcemanager/storagecache/armstoragecache/fake/server_factory.go +++ b/sdk/resourcemanager/storagecache/armstoragecache/fake/server_factory.go @@ -22,6 +22,7 @@ type ServerFactory struct { AmlFilesystemsServer AmlFilesystemsServer AscOperationsServer AscOperationsServer AscUsagesServer AscUsagesServer + AutoExportJobsServer AutoExportJobsServer CachesServer CachesServer ImportJobsServer ImportJobsServer ManagementServer ManagementServer @@ -49,6 +50,7 @@ type ServerFactoryTransport struct { trAmlFilesystemsServer *AmlFilesystemsServerTransport trAscOperationsServer *AscOperationsServerTransport trAscUsagesServer *AscUsagesServerTransport + trAutoExportJobsServer *AutoExportJobsServerTransport trCachesServer *CachesServerTransport trImportJobsServer *ImportJobsServerTransport trManagementServer *ManagementServerTransport @@ -85,6 +87,11 @@ func (s *ServerFactoryTransport) Do(req *http.Request) (*http.Response, error) { case "AscUsagesClient": initServer(s, &s.trAscUsagesServer, func() *AscUsagesServerTransport { return NewAscUsagesServerTransport(&s.srv.AscUsagesServer) }) resp, err = s.trAscUsagesServer.Do(req) + case "AutoExportJobsClient": + initServer(s, &s.trAutoExportJobsServer, func() *AutoExportJobsServerTransport { + return NewAutoExportJobsServerTransport(&s.srv.AutoExportJobsServer) + }) + resp, err = s.trAutoExportJobsServer.Do(req) case "CachesClient": initServer(s, &s.trCachesServer, func() *CachesServerTransport { return NewCachesServerTransport(&s.srv.CachesServer) }) resp, err = s.trCachesServer.Do(req) diff --git a/sdk/resourcemanager/storagecache/armstoragecache/go.mod b/sdk/resourcemanager/storagecache/armstoragecache/go.mod index c86640a8b7d3..699ffd790f52 100644 --- a/sdk/resourcemanager/storagecache/armstoragecache/go.mod +++ b/sdk/resourcemanager/storagecache/armstoragecache/go.mod @@ -4,7 +4,6 @@ go 1.18 require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v3 v3.1.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 @@ -12,6 +11,7 @@ require ( ) require ( + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect diff --git a/sdk/resourcemanager/storagecache/armstoragecache/importjobs_client.go b/sdk/resourcemanager/storagecache/armstoragecache/importjobs_client.go index 95aa06fedee6..02b75c7b55eb 100644 --- a/sdk/resourcemanager/storagecache/armstoragecache/importjobs_client.go +++ b/sdk/resourcemanager/storagecache/armstoragecache/importjobs_client.go @@ -43,10 +43,10 @@ func NewImportJobsClient(subscriptionID string, credential azcore.TokenCredentia return client, nil } -// BeginCreateOrUpdate - Create or update an import job. Import jobs are automatically deleted 72 hours after completion. +// BeginCreateOrUpdate - Create or update an import job. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - amlFilesystemName - Name for the AML file system. Allows alphanumerics, underscores, and hyphens. Start and end with alphanumeric. // - importJobName - Name for the import job. Allows alphanumerics, underscores, and hyphens. Start and end with alphanumeric. @@ -72,10 +72,10 @@ func (client *ImportJobsClient) BeginCreateOrUpdate(ctx context.Context, resourc } } -// CreateOrUpdate - Create or update an import job. Import jobs are automatically deleted 72 hours after completion. +// CreateOrUpdate - Create or update an import job. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 func (client *ImportJobsClient) createOrUpdate(ctx context.Context, resourceGroupName string, amlFilesystemName string, importJobName string, importJob ImportJob, options *ImportJobsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "ImportJobsClient.BeginCreateOrUpdate" @@ -121,7 +121,7 @@ func (client *ImportJobsClient) createOrUpdateCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, importJob); err != nil { @@ -133,7 +133,7 @@ func (client *ImportJobsClient) createOrUpdateCreateRequest(ctx context.Context, // BeginDelete - Schedules an import job for deletion. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - amlFilesystemName - Name for the AML file system. Allows alphanumerics, underscores, and hyphens. Start and end with alphanumeric. // - importJobName - Name for the import job. Allows alphanumerics, underscores, and hyphens. Start and end with alphanumeric. @@ -159,7 +159,7 @@ func (client *ImportJobsClient) BeginDelete(ctx context.Context, resourceGroupNa // Delete - Schedules an import job for deletion. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 func (client *ImportJobsClient) deleteOperation(ctx context.Context, resourceGroupName string, amlFilesystemName string, importJobName string, options *ImportJobsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "ImportJobsClient.BeginDelete" @@ -205,7 +205,7 @@ func (client *ImportJobsClient) deleteCreateRequest(ctx context.Context, resourc return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -214,7 +214,7 @@ func (client *ImportJobsClient) deleteCreateRequest(ctx context.Context, resourc // Get - Returns an import job. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - amlFilesystemName - Name for the AML file system. Allows alphanumerics, underscores, and hyphens. Start and end with alphanumeric. // - importJobName - Name for the import job. Allows alphanumerics, underscores, and hyphens. Start and end with alphanumeric. @@ -265,7 +265,7 @@ func (client *ImportJobsClient) getCreateRequest(ctx context.Context, resourceGr return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -282,7 +282,7 @@ func (client *ImportJobsClient) getHandleResponse(resp *http.Response) (ImportJo // NewListByAmlFilesystemPager - Returns all import jobs the user has access to under an AML File System. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - amlFilesystemName - Name for the AML file system. Allows alphanumerics, underscores, and hyphens. Start and end with alphanumeric. // - options - ImportJobsClientListByAmlFilesystemOptions contains the optional parameters for the ImportJobsClient.NewListByAmlFilesystemPager @@ -330,7 +330,7 @@ func (client *ImportJobsClient) listByAmlFilesystemCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -348,7 +348,7 @@ func (client *ImportJobsClient) listByAmlFilesystemHandleResponse(resp *http.Res // BeginUpdate - Update an import job instance. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - amlFilesystemName - Name for the AML file system. Allows alphanumerics, underscores, and hyphens. Start and end with alphanumeric. // - importJobName - Name for the import job. Allows alphanumerics, underscores, and hyphens. Start and end with alphanumeric. @@ -376,7 +376,7 @@ func (client *ImportJobsClient) BeginUpdate(ctx context.Context, resourceGroupNa // Update - Update an import job instance. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 func (client *ImportJobsClient) update(ctx context.Context, resourceGroupName string, amlFilesystemName string, importJobName string, importJob ImportJobUpdate, options *ImportJobsClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "ImportJobsClient.BeginUpdate" @@ -422,7 +422,7 @@ func (client *ImportJobsClient) updateCreateRequest(ctx context.Context, resourc return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, importJob); err != nil { diff --git a/sdk/resourcemanager/storagecache/armstoragecache/importjobs_client_example_test.go b/sdk/resourcemanager/storagecache/armstoragecache/importjobs_client_example_test.go deleted file mode 100644 index 22d8b0385afb..000000000000 --- a/sdk/resourcemanager/storagecache/armstoragecache/importjobs_client_example_test.go +++ /dev/null @@ -1,282 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armstoragecache_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storagecache/armstoragecache/v4" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/importJobs_Delete.json -func ExampleImportJobsClient_BeginDelete() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewImportJobsClient().BeginDelete(ctx, "scgroup", "fs1", "job1", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/importJobs_Get.json -func ExampleImportJobsClient_Get() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewImportJobsClient().Get(ctx, "scgroup", "fs1", "job1", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.ImportJob = armstoragecache.ImportJob{ - // Name: to.Ptr("job1"), - // Type: to.Ptr("Microsoft.StorageCache/amlFilesystem/importJob"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.StorageCache/amlfilesystems/fs1/storagetargets/job1"), - // Location: to.Ptr("eastus"), - // Properties: &armstoragecache.ImportJobProperties{ - // ConflictResolutionMode: to.Ptr(armstoragecache.ConflictResolutionModeOverwriteAlways), - // ImportPrefixes: []*string{ - // to.Ptr("/")}, - // MaximumErrors: to.Ptr[int32](0), - // ProvisioningState: to.Ptr(armstoragecache.ImportJobProvisioningStateTypeSucceeded), - // Status: &armstoragecache.ImportJobPropertiesStatus{ - // BlobsImportedPerSecond: to.Ptr[int64](4000), - // BlobsWalkedPerSecond: to.Ptr[int64](10000), - // LastCompletionTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-04-21T18:25:43.511Z"); return t}()), - // LastStartedTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-04-21T17:25:43.511Z"); return t}()), - // State: to.Ptr(armstoragecache.ImportStatusTypeCompleted), - // StatusMessage: to.Ptr("Import job completed successfully"), - // TotalBlobsImported: to.Ptr[int64](1000000), - // TotalBlobsWalked: to.Ptr[int64](1000000), - // TotalConflicts: to.Ptr[int32](1), - // TotalErrors: to.Ptr[int32](1), - // }, - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/importJobs_CreateOrUpdate.json -func ExampleImportJobsClient_BeginCreateOrUpdate() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewImportJobsClient().BeginCreateOrUpdate(ctx, "scgroup", "fs1", "job1", armstoragecache.ImportJob{ - Location: to.Ptr("eastus"), - Tags: map[string]*string{ - "Dept": to.Ptr("ContosoAds"), - }, - Properties: &armstoragecache.ImportJobProperties{ - ConflictResolutionMode: to.Ptr(armstoragecache.ConflictResolutionModeOverwriteAlways), - ImportPrefixes: []*string{ - to.Ptr("/")}, - MaximumErrors: to.Ptr[int32](0), - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.ImportJob = armstoragecache.ImportJob{ - // Name: to.Ptr("job1"), - // Type: to.Ptr("Microsoft.StorageCache/amlFilesystem/importJob"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.StorageCache/amlfilesystems/fs1/storagetargets/job1"), - // Location: to.Ptr("eastus"), - // Tags: map[string]*string{ - // "Dept": to.Ptr("ContosoAds"), - // }, - // Properties: &armstoragecache.ImportJobProperties{ - // ConflictResolutionMode: to.Ptr(armstoragecache.ConflictResolutionModeOverwriteAlways), - // ImportPrefixes: []*string{ - // to.Ptr("/")}, - // MaximumErrors: to.Ptr[int32](0), - // ProvisioningState: to.Ptr(armstoragecache.ImportJobProvisioningStateTypeSucceeded), - // Status: &armstoragecache.ImportJobPropertiesStatus{ - // BlobsImportedPerSecond: to.Ptr[int64](4000), - // BlobsWalkedPerSecond: to.Ptr[int64](10000), - // LastCompletionTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-04-21T18:25:43.511Z"); return t}()), - // LastStartedTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-04-21T17:25:43.511Z"); return t}()), - // State: to.Ptr(armstoragecache.ImportStatusTypeCompleted), - // StatusMessage: to.Ptr("Import job completed successfully"), - // TotalBlobsImported: to.Ptr[int64](1000000), - // TotalBlobsWalked: to.Ptr[int64](1000000), - // TotalConflicts: to.Ptr[int32](1), - // TotalErrors: to.Ptr[int32](1), - // }, - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/importJob_Update.json -func ExampleImportJobsClient_BeginUpdate() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewImportJobsClient().BeginUpdate(ctx, "scgroup", "fs1", "job1", armstoragecache.ImportJobUpdate{ - Tags: map[string]*string{ - "Dept": to.Ptr("ContosoAds"), - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.ImportJob = armstoragecache.ImportJob{ - // Name: to.Ptr("job1"), - // Type: to.Ptr("Microsoft.StorageCache/amlFilesystem/importJob"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.StorageCache/amlfilesystems/fs1/storagetargets/job1"), - // Location: to.Ptr("eastus"), - // Tags: map[string]*string{ - // "Dept": to.Ptr("ContosoAds"), - // }, - // Properties: &armstoragecache.ImportJobProperties{ - // ConflictResolutionMode: to.Ptr(armstoragecache.ConflictResolutionModeOverwriteAlways), - // ImportPrefixes: []*string{ - // to.Ptr("/")}, - // MaximumErrors: to.Ptr[int32](0), - // ProvisioningState: to.Ptr(armstoragecache.ImportJobProvisioningStateTypeSucceeded), - // Status: &armstoragecache.ImportJobPropertiesStatus{ - // BlobsImportedPerSecond: to.Ptr[int64](4000), - // BlobsWalkedPerSecond: to.Ptr[int64](10000), - // LastCompletionTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-04-21T18:25:43.511Z"); return t}()), - // LastStartedTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-04-21T17:25:43.511Z"); return t}()), - // State: to.Ptr(armstoragecache.ImportStatusTypeCompleted), - // StatusMessage: to.Ptr("Import job completed successfully"), - // TotalBlobsImported: to.Ptr[int64](1000000), - // TotalBlobsWalked: to.Ptr[int64](1000000), - // TotalConflicts: to.Ptr[int32](1), - // TotalErrors: to.Ptr[int32](1), - // }, - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/importJobs_ListByAmlFilesystem.json -func ExampleImportJobsClient_NewListByAmlFilesystemPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewImportJobsClient().NewListByAmlFilesystemPager("scgroup", "fs1", nil) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.ImportJobsListResult = armstoragecache.ImportJobsListResult{ - // Value: []*armstoragecache.ImportJob{ - // { - // Name: to.Ptr("job1"), - // Type: to.Ptr("Microsoft.StorageCache/amlFilesystem/importJob"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.StorageCache/amlfilesystems/fs1/storagetargets/job1"), - // Location: to.Ptr("eastus"), - // Properties: &armstoragecache.ImportJobProperties{ - // ConflictResolutionMode: to.Ptr(armstoragecache.ConflictResolutionModeOverwriteAlways), - // ImportPrefixes: []*string{ - // to.Ptr("/")}, - // MaximumErrors: to.Ptr[int32](0), - // ProvisioningState: to.Ptr(armstoragecache.ImportJobProvisioningStateTypeSucceeded), - // Status: &armstoragecache.ImportJobPropertiesStatus{ - // BlobsImportedPerSecond: to.Ptr[int64](4000), - // BlobsWalkedPerSecond: to.Ptr[int64](10000), - // LastCompletionTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-04-21T18:25:43.511Z"); return t}()), - // LastStartedTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-04-21T17:25:43.511Z"); return t}()), - // State: to.Ptr(armstoragecache.ImportStatusTypeCompleted), - // StatusMessage: to.Ptr("Import job completed successfully"), - // TotalBlobsImported: to.Ptr[int64](1000000), - // TotalBlobsWalked: to.Ptr[int64](1000000), - // TotalConflicts: to.Ptr[int32](1), - // TotalErrors: to.Ptr[int32](1), - // }, - // }, - // }, - // { - // Name: to.Ptr("job2"), - // Type: to.Ptr("Microsoft.StorageCache/amlFilesystem/importJob"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.StorageCache/amlfilesystems/fs1/storagetargets/job2"), - // Location: to.Ptr("eastus"), - // Properties: &armstoragecache.ImportJobProperties{ - // ConflictResolutionMode: to.Ptr(armstoragecache.ConflictResolutionModeSkip), - // ImportPrefixes: []*string{ - // to.Ptr("/dir1")}, - // MaximumErrors: to.Ptr[int32](0), - // ProvisioningState: to.Ptr(armstoragecache.ImportJobProvisioningStateTypeSucceeded), - // Status: &armstoragecache.ImportJobPropertiesStatus{ - // BlobsImportedPerSecond: to.Ptr[int64](4000), - // BlobsWalkedPerSecond: to.Ptr[int64](10000), - // LastCompletionTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-04-21T18:25:43.511Z"); return t}()), - // LastStartedTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2019-04-21T17:25:43.511Z"); return t}()), - // State: to.Ptr(armstoragecache.ImportStatusTypeCompleted), - // StatusMessage: to.Ptr("Import job completed successfully"), - // TotalBlobsImported: to.Ptr[int64](1000000), - // TotalBlobsWalked: to.Ptr[int64](1000000), - // TotalConflicts: to.Ptr[int32](1), - // TotalErrors: to.Ptr[int32](1), - // }, - // }, - // }}, - // } - } -} diff --git a/sdk/resourcemanager/storagecache/armstoragecache/management_client.go b/sdk/resourcemanager/storagecache/armstoragecache/management_client.go index dbf12a7bbcae..c79f54fb9eff 100644 --- a/sdk/resourcemanager/storagecache/armstoragecache/management_client.go +++ b/sdk/resourcemanager/storagecache/armstoragecache/management_client.go @@ -46,7 +46,7 @@ func NewManagementClient(subscriptionID string, credential azcore.TokenCredentia // CheckAmlFSSubnets - Check that subnets will be valid for AML file system create calls. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - options - ManagementClientCheckAmlFSSubnetsOptions contains the optional parameters for the ManagementClient.CheckAmlFSSubnets // method. func (client *ManagementClient) CheckAmlFSSubnets(ctx context.Context, options *ManagementClientCheckAmlFSSubnetsOptions) (ManagementClientCheckAmlFSSubnetsResponse, error) { @@ -82,7 +82,7 @@ func (client *ManagementClient) checkAmlFSSubnetsCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.AmlFilesystemSubnetInfo != nil { @@ -97,7 +97,7 @@ func (client *ManagementClient) checkAmlFSSubnetsCreateRequest(ctx context.Conte // GetRequiredAmlFSSubnetsSize - Get the number of available IP addresses needed for the AML file system information provided. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - options - ManagementClientGetRequiredAmlFSSubnetsSizeOptions contains the optional parameters for the ManagementClient.GetRequiredAmlFSSubnetsSize // method. func (client *ManagementClient) GetRequiredAmlFSSubnetsSize(ctx context.Context, options *ManagementClientGetRequiredAmlFSSubnetsSizeOptions) (ManagementClientGetRequiredAmlFSSubnetsSizeResponse, error) { @@ -134,7 +134,7 @@ func (client *ManagementClient) getRequiredAmlFSSubnetsSizeCreateRequest(ctx con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if options != nil && options.RequiredAMLFilesystemSubnetsSizeInfo != nil { diff --git a/sdk/resourcemanager/storagecache/armstoragecache/management_client_example_test.go b/sdk/resourcemanager/storagecache/armstoragecache/management_client_example_test.go deleted file mode 100644 index dca4bb3966fe..000000000000 --- a/sdk/resourcemanager/storagecache/armstoragecache/management_client_example_test.go +++ /dev/null @@ -1,66 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armstoragecache_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storagecache/armstoragecache/v4" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/checkAmlFSSubnets.json -func ExampleManagementClient_CheckAmlFSSubnets() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - _, err = clientFactory.NewManagementClient().CheckAmlFSSubnets(ctx, &armstoragecache.ManagementClientCheckAmlFSSubnetsOptions{AmlFilesystemSubnetInfo: &armstoragecache.AmlFilesystemSubnetInfo{ - FilesystemSubnet: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/fsSub"), - SKU: &armstoragecache.SKUName{ - Name: to.Ptr("AMLFS-Durable-Premium-125"), - }, - StorageCapacityTiB: to.Ptr[float32](16), - }, - }) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/getRequiredAmlFSSubnetsSize.json -func ExampleManagementClient_GetRequiredAmlFSSubnetsSize() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewManagementClient().GetRequiredAmlFSSubnetsSize(ctx, &armstoragecache.ManagementClientGetRequiredAmlFSSubnetsSizeOptions{RequiredAMLFilesystemSubnetsSizeInfo: nil}) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.RequiredAmlFilesystemSubnetsSize = armstoragecache.RequiredAmlFilesystemSubnetsSize{ - // FilesystemSubnetSize: to.Ptr[int32](24), - // } -} diff --git a/sdk/resourcemanager/storagecache/armstoragecache/models.go b/sdk/resourcemanager/storagecache/armstoragecache/models.go index 251ba680b812..ae1213eaa193 100644 --- a/sdk/resourcemanager/storagecache/armstoragecache/models.go +++ b/sdk/resourcemanager/storagecache/armstoragecache/models.go @@ -388,6 +388,117 @@ type AscOperationProperties struct { Output map[string]any } +// AutoExportJob - An auto export job instance. Follows Azure Resource Manager standards: https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/resource-api-reference.md +type AutoExportJob struct { + // REQUIRED; The geo-location where the resource lives + Location *string + + // Properties of the auto export job. + Properties *AutoExportJobProperties + + // Resource tags. + Tags map[string]*string + + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string + + // READ-ONLY; The name of the resource + Name *string + + // READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. + SystemData *SystemData + + // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string +} + +// AutoExportJobProperties - Properties of the auto export job. +type AutoExportJobProperties struct { + // The administrative status of the auto export job. Possible values: 'Enable', 'Disable'. Passing in a value of 'Disable' + // will disable the current active auto export job. By default it is set to + // 'Enable'. + AdminStatus *AutoExportJobAdminStatus + + // An array of blob paths/prefixes that get auto exported to the cluster namespace. It has '/' as the default value. Number + // of maximum allowed paths for now is 1. + AutoExportPrefixes []*string + + // READ-ONLY; ARM provisioning state. + ProvisioningState *AutoExportJobProvisioningStateType + + // READ-ONLY; The status of the auto export + Status *AutoExportJobPropertiesStatus +} + +// AutoExportJobPropertiesStatus - The status of the auto export +type AutoExportJobPropertiesStatus struct { + // The operational state of auto export. InProgress indicates the export is running. Disabling indicates the user has requested + // to disable the export but the disabling is still in progress. Disabled + // indicates auto export has been disabled. DisableFailed indicates the disabling has failed. Failed means the export was + // unable to continue, due to a fatal error. + State *AutoExportStatusType + + // READ-ONLY; Files discovered for export in current iteration. It may increase while more export items are found. + CurrentIterationFilesDiscovered *int64 + + // READ-ONLY; Files that have been exported in current iteration. + CurrentIterationFilesExported *int64 + + // READ-ONLY; Files failed to export in current iteration. + CurrentIterationFilesFailed *int64 + + // READ-ONLY; Data (in MiB) discovered for export in current iteration. It may increase while more export items are found. + CurrentIterationMiBDiscovered *int64 + + // READ-ONLY; Data (in MiB) that have been exported in current iteration. + CurrentIterationMiBExported *int64 + + // READ-ONLY; Number of iterations completed since the start of the export. + ExportIterationCount *int32 + + // READ-ONLY; The time (in UTC) of the last completed auto export job. + LastCompletionTimeUTC *time.Time + + // READ-ONLY; The time (in UTC) the latest auto export job started. + LastStartedTimeUTC *time.Time + + // READ-ONLY; Time (in UTC) of the last successfully completed export iteration. Look at logging container for details. + LastSuccessfulIterationCompletionTimeUTC *time.Time + + // READ-ONLY; Server-defined status code for auto export job. + StatusCode *string + + // READ-ONLY; Server-defined status message for auto export job. + StatusMessage *string + + // READ-ONLY; Total files exported since the start of the export. This is accumulative, some files may be counted repeatedly. + TotalFilesExported *int64 + + // READ-ONLY; Total files failed to be export since the last successfully completed iteration. This is accumulative, some + // files may be counted repeatedly. + TotalFilesFailed *int64 + + // READ-ONLY; Total data (in MiB) exported since the start of the export. This is accumulative, some files may be counted + // repeatedly. + TotalMiBExported *int64 +} + +// AutoExportJobUpdate - An auto export job update instance. +type AutoExportJobUpdate struct { + // Resource tags. + Tags map[string]*string +} + +// AutoExportJobsListResult - Result of the request to list auto export jobs. It contains a list of auto export jobs and a +// URL link to get the next set of results. +type AutoExportJobsListResult struct { + // URL to get the next set of auto export job list results, if there are any. + NextLink *string + + // List of auto export jobs. + Value []*AutoExportJob +} + // BlobNfsTarget - Properties pertaining to the BlobNfsTarget. type BlobNfsTarget struct { // Resource ID of the storage container. @@ -726,6 +837,10 @@ type ImportJob struct { // ImportJobProperties - Properties of the import job. type ImportJobProperties struct { + // The administrative status of the import job. Possible values: 'Enable', 'Disable'. Passing in a value of 'Disable' will + // cancel the current active import job. By default it is set to 'Enable'. + AdminStatus *ImportJobAdminStatus + // How the import job will handle conflicts. For example, if the import job is trying to bring in a directory, but a file // is at that path, how it handles it. Fail indicates that the import job should // stop immediately and not do anything with the conflict. Skip indicates that it should pass over the conflict. OverwriteIfDirty @@ -757,17 +872,35 @@ type ImportJobPropertiesStatus struct { // READ-ONLY; A recent and frequently updated rate of blobs walked per second. BlobsWalkedPerSecond *int64 - // READ-ONLY; The time of the last completed archive operation + // READ-ONLY; New or modified directories that have been imported into the filesystem. + ImportedDirectories *int64 + + // READ-ONLY; New or modified files that have been imported into the filesystem. + ImportedFiles *int64 + + // READ-ONLY; Newly added symbolic links into the filesystem. + ImportedSymlinks *int64 + + // READ-ONLY; The time (in UTC) of the last completed import job. LastCompletionTime *time.Time - // READ-ONLY; The time the latest archive operation started + // READ-ONLY; The time (in UTC) the latest import job started. LastStartedTime *time.Time - // READ-ONLY; The state of the import job. InProgress indicates the import is still running. Canceled indicates it has been - // canceled by the user. Completed indicates import finished, successfully importing all - // discovered blobs into the Lustre namespace. CompletedPartial indicates the import finished but some blobs either were found - // to be conflicting and could not be imported or other errors were - // encountered. Failed means the import was unable to complete due to a fatal error. + // READ-ONLY; Directories that already exist in the filesystem and have not been modified. + PreexistingDirectories *int64 + + // READ-ONLY; Files that already exist in the filesystem and have not been modified. + PreexistingFiles *int64 + + // READ-ONLY; Symbolic links that already exist in the filesystem and have not been modified. + PreexistingSymlinks *int64 + + // READ-ONLY; The operational state of the import job. InProgress indicates the import is still running. Canceled indicates + // it has been canceled by the user. Completed indicates import finished, successfully + // importing all discovered blobs into the Lustre namespace. CompletedPartial indicates the import finished but some blobs + // either were found to be conflicting and could not be imported or other errors + // were encountered. Failed means the import was unable to complete due to a fatal error. State *ImportStatusType // READ-ONLY; The status message of the import job. diff --git a/sdk/resourcemanager/storagecache/armstoragecache/models_serde.go b/sdk/resourcemanager/storagecache/armstoragecache/models_serde.go index 978d572b4f14..b0c6238d4068 100644 --- a/sdk/resourcemanager/storagecache/armstoragecache/models_serde.go +++ b/sdk/resourcemanager/storagecache/armstoragecache/models_serde.go @@ -1012,6 +1012,237 @@ func (a *AscOperationProperties) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type AutoExportJob. +func (a AutoExportJob) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", a.ID) + populate(objectMap, "location", a.Location) + populate(objectMap, "name", a.Name) + populate(objectMap, "properties", a.Properties) + populate(objectMap, "systemData", a.SystemData) + populate(objectMap, "tags", a.Tags) + populate(objectMap, "type", a.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AutoExportJob. +func (a *AutoExportJob) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &a.ID) + delete(rawMsg, key) + case "location": + err = unpopulate(val, "Location", &a.Location) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &a.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &a.Properties) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &a.SystemData) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &a.Tags) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &a.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type AutoExportJobProperties. +func (a AutoExportJobProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "adminStatus", a.AdminStatus) + populate(objectMap, "autoExportPrefixes", a.AutoExportPrefixes) + populate(objectMap, "provisioningState", a.ProvisioningState) + populate(objectMap, "status", a.Status) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AutoExportJobProperties. +func (a *AutoExportJobProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "adminStatus": + err = unpopulate(val, "AdminStatus", &a.AdminStatus) + delete(rawMsg, key) + case "autoExportPrefixes": + err = unpopulate(val, "AutoExportPrefixes", &a.AutoExportPrefixes) + delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &a.ProvisioningState) + delete(rawMsg, key) + case "status": + err = unpopulate(val, "Status", &a.Status) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type AutoExportJobPropertiesStatus. +func (a AutoExportJobPropertiesStatus) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "currentIterationFilesDiscovered", a.CurrentIterationFilesDiscovered) + populate(objectMap, "currentIterationFilesExported", a.CurrentIterationFilesExported) + populate(objectMap, "currentIterationFilesFailed", a.CurrentIterationFilesFailed) + populate(objectMap, "currentIterationMiBDiscovered", a.CurrentIterationMiBDiscovered) + populate(objectMap, "currentIterationMiBExported", a.CurrentIterationMiBExported) + populate(objectMap, "exportIterationCount", a.ExportIterationCount) + populateDateTimeRFC3339(objectMap, "lastCompletionTimeUTC", a.LastCompletionTimeUTC) + populateDateTimeRFC3339(objectMap, "lastStartedTimeUTC", a.LastStartedTimeUTC) + populateDateTimeRFC3339(objectMap, "lastSuccessfulIterationCompletionTimeUTC", a.LastSuccessfulIterationCompletionTimeUTC) + populate(objectMap, "state", a.State) + populate(objectMap, "statusCode", a.StatusCode) + populate(objectMap, "statusMessage", a.StatusMessage) + populate(objectMap, "totalFilesExported", a.TotalFilesExported) + populate(objectMap, "totalFilesFailed", a.TotalFilesFailed) + populate(objectMap, "totalMiBExported", a.TotalMiBExported) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AutoExportJobPropertiesStatus. +func (a *AutoExportJobPropertiesStatus) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "currentIterationFilesDiscovered": + err = unpopulate(val, "CurrentIterationFilesDiscovered", &a.CurrentIterationFilesDiscovered) + delete(rawMsg, key) + case "currentIterationFilesExported": + err = unpopulate(val, "CurrentIterationFilesExported", &a.CurrentIterationFilesExported) + delete(rawMsg, key) + case "currentIterationFilesFailed": + err = unpopulate(val, "CurrentIterationFilesFailed", &a.CurrentIterationFilesFailed) + delete(rawMsg, key) + case "currentIterationMiBDiscovered": + err = unpopulate(val, "CurrentIterationMiBDiscovered", &a.CurrentIterationMiBDiscovered) + delete(rawMsg, key) + case "currentIterationMiBExported": + err = unpopulate(val, "CurrentIterationMiBExported", &a.CurrentIterationMiBExported) + delete(rawMsg, key) + case "exportIterationCount": + err = unpopulate(val, "ExportIterationCount", &a.ExportIterationCount) + delete(rawMsg, key) + case "lastCompletionTimeUTC": + err = unpopulateDateTimeRFC3339(val, "LastCompletionTimeUTC", &a.LastCompletionTimeUTC) + delete(rawMsg, key) + case "lastStartedTimeUTC": + err = unpopulateDateTimeRFC3339(val, "LastStartedTimeUTC", &a.LastStartedTimeUTC) + delete(rawMsg, key) + case "lastSuccessfulIterationCompletionTimeUTC": + err = unpopulateDateTimeRFC3339(val, "LastSuccessfulIterationCompletionTimeUTC", &a.LastSuccessfulIterationCompletionTimeUTC) + delete(rawMsg, key) + case "state": + err = unpopulate(val, "State", &a.State) + delete(rawMsg, key) + case "statusCode": + err = unpopulate(val, "StatusCode", &a.StatusCode) + delete(rawMsg, key) + case "statusMessage": + err = unpopulate(val, "StatusMessage", &a.StatusMessage) + delete(rawMsg, key) + case "totalFilesExported": + err = unpopulate(val, "TotalFilesExported", &a.TotalFilesExported) + delete(rawMsg, key) + case "totalFilesFailed": + err = unpopulate(val, "TotalFilesFailed", &a.TotalFilesFailed) + delete(rawMsg, key) + case "totalMiBExported": + err = unpopulate(val, "TotalMiBExported", &a.TotalMiBExported) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type AutoExportJobUpdate. +func (a AutoExportJobUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "tags", a.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AutoExportJobUpdate. +func (a *AutoExportJobUpdate) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "tags": + err = unpopulate(val, "Tags", &a.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type AutoExportJobsListResult. +func (a AutoExportJobsListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", a.NextLink) + populate(objectMap, "value", a.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AutoExportJobsListResult. +func (a *AutoExportJobsListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &a.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &a.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type BlobNfsTarget. func (b BlobNfsTarget) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -1823,6 +2054,7 @@ func (i *ImportJob) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type ImportJobProperties. func (i ImportJobProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) + populate(objectMap, "adminStatus", i.AdminStatus) populate(objectMap, "conflictResolutionMode", i.ConflictResolutionMode) populate(objectMap, "importPrefixes", i.ImportPrefixes) populate(objectMap, "maximumErrors", i.MaximumErrors) @@ -1840,6 +2072,9 @@ func (i *ImportJobProperties) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { + case "adminStatus": + err = unpopulate(val, "AdminStatus", &i.AdminStatus) + delete(rawMsg, key) case "conflictResolutionMode": err = unpopulate(val, "ConflictResolutionMode", &i.ConflictResolutionMode) delete(rawMsg, key) @@ -1868,8 +2103,14 @@ func (i ImportJobPropertiesStatus) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "blobsImportedPerSecond", i.BlobsImportedPerSecond) populate(objectMap, "blobsWalkedPerSecond", i.BlobsWalkedPerSecond) + populate(objectMap, "importedDirectories", i.ImportedDirectories) + populate(objectMap, "importedFiles", i.ImportedFiles) + populate(objectMap, "importedSymlinks", i.ImportedSymlinks) populateDateTimeRFC3339(objectMap, "lastCompletionTime", i.LastCompletionTime) populateDateTimeRFC3339(objectMap, "lastStartedTime", i.LastStartedTime) + populate(objectMap, "preexistingDirectories", i.PreexistingDirectories) + populate(objectMap, "preexistingFiles", i.PreexistingFiles) + populate(objectMap, "preexistingSymlinks", i.PreexistingSymlinks) populate(objectMap, "state", i.State) populate(objectMap, "statusMessage", i.StatusMessage) populate(objectMap, "totalBlobsImported", i.TotalBlobsImported) @@ -1894,12 +2135,30 @@ func (i *ImportJobPropertiesStatus) UnmarshalJSON(data []byte) error { case "blobsWalkedPerSecond": err = unpopulate(val, "BlobsWalkedPerSecond", &i.BlobsWalkedPerSecond) delete(rawMsg, key) + case "importedDirectories": + err = unpopulate(val, "ImportedDirectories", &i.ImportedDirectories) + delete(rawMsg, key) + case "importedFiles": + err = unpopulate(val, "ImportedFiles", &i.ImportedFiles) + delete(rawMsg, key) + case "importedSymlinks": + err = unpopulate(val, "ImportedSymlinks", &i.ImportedSymlinks) + delete(rawMsg, key) case "lastCompletionTime": err = unpopulateDateTimeRFC3339(val, "LastCompletionTime", &i.LastCompletionTime) delete(rawMsg, key) case "lastStartedTime": err = unpopulateDateTimeRFC3339(val, "LastStartedTime", &i.LastStartedTime) delete(rawMsg, key) + case "preexistingDirectories": + err = unpopulate(val, "PreexistingDirectories", &i.PreexistingDirectories) + delete(rawMsg, key) + case "preexistingFiles": + err = unpopulate(val, "PreexistingFiles", &i.PreexistingFiles) + delete(rawMsg, key) + case "preexistingSymlinks": + err = unpopulate(val, "PreexistingSymlinks", &i.PreexistingSymlinks) + delete(rawMsg, key) case "state": err = unpopulate(val, "State", &i.State) delete(rawMsg, key) diff --git a/sdk/resourcemanager/storagecache/armstoragecache/operations_client.go b/sdk/resourcemanager/storagecache/armstoragecache/operations_client.go index 46390dcfeb6c..6bb3379e9d2c 100644 --- a/sdk/resourcemanager/storagecache/armstoragecache/operations_client.go +++ b/sdk/resourcemanager/storagecache/armstoragecache/operations_client.go @@ -39,7 +39,7 @@ func NewOperationsClient(credential azcore.TokenCredential, options *arm.ClientO // NewListPager - Lists all of the available Resource Provider operations. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - options - OperationsClientListOptions contains the optional parameters for the OperationsClient.NewListPager method. func (client *OperationsClient) NewListPager(options *OperationsClientListOptions) *runtime.Pager[OperationsClientListResponse] { return runtime.NewPager(runtime.PagingHandler[OperationsClientListResponse]{ @@ -72,7 +72,7 @@ func (client *OperationsClient) listCreateRequest(ctx context.Context, options * return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/sdk/resourcemanager/storagecache/armstoragecache/operations_client_example_test.go b/sdk/resourcemanager/storagecache/armstoragecache/operations_client_example_test.go deleted file mode 100644 index 7d743638a789..000000000000 --- a/sdk/resourcemanager/storagecache/armstoragecache/operations_client_example_test.go +++ /dev/null @@ -1,102 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armstoragecache_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storagecache/armstoragecache/v4" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/Operations_List.json -func ExampleOperationsClient_NewListPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewOperationsClient().NewListPager(nil) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.APIOperationListResult = armstoragecache.APIOperationListResult{ - // Value: []*armstoragecache.APIOperation{ - // { - // Name: to.Ptr("Microsoft.StorageCache/caches/write"), - // Display: &armstoragecache.APIOperationDisplay{ - // Operation: to.Ptr("Create or Update Cache"), - // Provider: to.Ptr("Azure Storage Cache"), - // Resource: to.Ptr("Caches"), - // }, - // }, - // { - // Name: to.Ptr("Microsoft.StorageCache/caches/delete"), - // Display: &armstoragecache.APIOperationDisplay{ - // Operation: to.Ptr("Delete Cache"), - // Provider: to.Ptr("Azure Storage Cache"), - // Resource: to.Ptr("Caches"), - // }, - // }, - // { - // Name: to.Ptr("Microsoft.StorageCache/caches/providers/Microsoft.Insights/metricDefinitions/read"), - // Display: &armstoragecache.APIOperationDisplay{ - // Description: to.Ptr("Reads Cache Metric Definitions."), - // Operation: to.Ptr("Get Cache Metric Definitions"), - // Provider: to.Ptr("Microsoft Azure HPC Cache"), - // Resource: to.Ptr("StorageCache Metric Definitions"), - // }, - // IsDataAction: to.Ptr(false), - // Origin: to.Ptr("system"), - // Properties: &armstoragecache.APIOperationProperties{ - // ServiceSpecification: &armstoragecache.APIOperationPropertiesServiceSpecification{ - // MetricSpecifications: []*armstoragecache.MetricSpecification{ - // { - // Name: to.Ptr("ClientIOPS"), - // AggregationType: to.Ptr("Average"), - // DisplayDescription: to.Ptr("The rate of client file operations processed by the Cache."), - // DisplayName: to.Ptr("Total Client IOPS"), - // MetricClass: to.Ptr("Transactions"), - // SupportedAggregationTypes: []*armstoragecache.MetricAggregationType{ - // to.Ptr(armstoragecache.MetricAggregationTypeMinimum), - // to.Ptr(armstoragecache.MetricAggregationTypeMaximum), - // to.Ptr(armstoragecache.MetricAggregationTypeAverage)}, - // Unit: to.Ptr("Count"), - // }, - // { - // Name: to.Ptr("ClientLatency"), - // AggregationType: to.Ptr("Average"), - // DisplayDescription: to.Ptr("Average latency of client file operations to the Cache."), - // DisplayName: to.Ptr("Average Client Latency"), - // MetricClass: to.Ptr("Latency"), - // SupportedAggregationTypes: []*armstoragecache.MetricAggregationType{ - // to.Ptr(armstoragecache.MetricAggregationTypeMinimum), - // to.Ptr(armstoragecache.MetricAggregationTypeMaximum), - // to.Ptr(armstoragecache.MetricAggregationTypeAverage)}, - // Unit: to.Ptr("Milliseconds"), - // }}, - // }, - // }, - // }}, - // } - } -} diff --git a/sdk/resourcemanager/storagecache/armstoragecache/options.go b/sdk/resourcemanager/storagecache/armstoragecache/options.go index 282ab5b6c8c1..821f557df195 100644 --- a/sdk/resourcemanager/storagecache/armstoragecache/options.go +++ b/sdk/resourcemanager/storagecache/armstoragecache/options.go @@ -64,6 +64,36 @@ type AscUsagesClientListOptions struct { // placeholder for future optional parameters } +// AutoExportJobsClientBeginCreateOrUpdateOptions contains the optional parameters for the AutoExportJobsClient.BeginCreateOrUpdate +// method. +type AutoExportJobsClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// AutoExportJobsClientBeginDeleteOptions contains the optional parameters for the AutoExportJobsClient.BeginDelete method. +type AutoExportJobsClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// AutoExportJobsClientBeginUpdateOptions contains the optional parameters for the AutoExportJobsClient.BeginUpdate method. +type AutoExportJobsClientBeginUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// AutoExportJobsClientGetOptions contains the optional parameters for the AutoExportJobsClient.Get method. +type AutoExportJobsClientGetOptions struct { + // placeholder for future optional parameters +} + +// AutoExportJobsClientListByAmlFilesystemOptions contains the optional parameters for the AutoExportJobsClient.NewListByAmlFilesystemPager +// method. +type AutoExportJobsClientListByAmlFilesystemOptions struct { + // placeholder for future optional parameters +} + // CachesClientBeginCreateOrUpdateOptions contains the optional parameters for the CachesClient.BeginCreateOrUpdate method. type CachesClientBeginCreateOrUpdateOptions struct { // Resumes the LRO from the provided token. diff --git a/sdk/resourcemanager/storagecache/armstoragecache/responses.go b/sdk/resourcemanager/storagecache/armstoragecache/responses.go index 89017c28c802..18c9fa6e8f25 100644 --- a/sdk/resourcemanager/storagecache/armstoragecache/responses.go +++ b/sdk/resourcemanager/storagecache/armstoragecache/responses.go @@ -68,6 +68,36 @@ type AscUsagesClientListResponse struct { ResourceUsagesListResult } +// AutoExportJobsClientCreateOrUpdateResponse contains the response from method AutoExportJobsClient.BeginCreateOrUpdate. +type AutoExportJobsClientCreateOrUpdateResponse struct { + // An auto export job instance. Follows Azure Resource Manager standards: https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/resource-api-reference.md + AutoExportJob +} + +// AutoExportJobsClientDeleteResponse contains the response from method AutoExportJobsClient.BeginDelete. +type AutoExportJobsClientDeleteResponse struct { + // placeholder for future response values +} + +// AutoExportJobsClientGetResponse contains the response from method AutoExportJobsClient.Get. +type AutoExportJobsClientGetResponse struct { + // An auto export job instance. Follows Azure Resource Manager standards: https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/resource-api-reference.md + AutoExportJob +} + +// AutoExportJobsClientListByAmlFilesystemResponse contains the response from method AutoExportJobsClient.NewListByAmlFilesystemPager. +type AutoExportJobsClientListByAmlFilesystemResponse struct { + // Result of the request to list auto export jobs. It contains a list of auto export jobs and a URL link to get the next set + // of results. + AutoExportJobsListResult +} + +// AutoExportJobsClientUpdateResponse contains the response from method AutoExportJobsClient.BeginUpdate. +type AutoExportJobsClientUpdateResponse struct { + // An auto export job instance. Follows Azure Resource Manager standards: https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/resource-api-reference.md + AutoExportJob +} + // CachesClientCreateOrUpdateResponse contains the response from method CachesClient.BeginCreateOrUpdate. type CachesClientCreateOrUpdateResponse struct { // A cache instance. Follows Azure Resource Manager standards: https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/resource-api-reference.md diff --git a/sdk/resourcemanager/storagecache/armstoragecache/skus_client.go b/sdk/resourcemanager/storagecache/armstoragecache/skus_client.go index 69d19223f0cd..c727c1bd5562 100644 --- a/sdk/resourcemanager/storagecache/armstoragecache/skus_client.go +++ b/sdk/resourcemanager/storagecache/armstoragecache/skus_client.go @@ -45,7 +45,7 @@ func NewSKUsClient(subscriptionID string, credential azcore.TokenCredential, opt // NewListPager - Get the list of StorageCache.Cache SKUs available to this subscription. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - options - SKUsClientListOptions contains the optional parameters for the SKUsClient.NewListPager method. func (client *SKUsClient) NewListPager(options *SKUsClientListOptions) *runtime.Pager[SKUsClientListResponse] { return runtime.NewPager(runtime.PagingHandler[SKUsClientListResponse]{ @@ -82,7 +82,7 @@ func (client *SKUsClient) listCreateRequest(ctx context.Context, options *SKUsCl return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/sdk/resourcemanager/storagecache/armstoragecache/skus_client_example_test.go b/sdk/resourcemanager/storagecache/armstoragecache/skus_client_example_test.go deleted file mode 100644 index 3f4321dc0d5c..000000000000 --- a/sdk/resourcemanager/storagecache/armstoragecache/skus_client_example_test.go +++ /dev/null @@ -1,115 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armstoragecache_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storagecache/armstoragecache/v4" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/Skus_List.json -func ExampleSKUsClient_NewListPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewSKUsClient().NewListPager(nil) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.ResourceSKUsResult = armstoragecache.ResourceSKUsResult{ - // Value: []*armstoragecache.ResourceSKU{ - // { - // Name: to.Ptr("Standard_2G"), - // Capabilities: []*armstoragecache.ResourceSKUCapabilities{ - // { - // Name: to.Ptr("throughput GB/s"), - // Value: to.Ptr("2"), - // }, - // { - // Name: to.Ptr("cache sizes(GB)"), - // Value: to.Ptr("3072,6144,12288"), - // }}, - // LocationInfo: []*armstoragecache.ResourceSKULocationInfo{ - // { - // Location: to.Ptr("eastus"), - // Zones: []*string{ - // }, - // }}, - // Locations: []*string{ - // to.Ptr("eastus")}, - // ResourceType: to.Ptr("caches"), - // Restrictions: []*armstoragecache.Restriction{ - // }, - // }, - // { - // Name: to.Ptr("Standard_4G"), - // Capabilities: []*armstoragecache.ResourceSKUCapabilities{ - // { - // Name: to.Ptr("throughput GB/s"), - // Value: to.Ptr("4"), - // }, - // { - // Name: to.Ptr("cache sizes(GB)"), - // Value: to.Ptr("6144,12288,24576"), - // }}, - // LocationInfo: []*armstoragecache.ResourceSKULocationInfo{ - // { - // Location: to.Ptr("eastus"), - // Zones: []*string{ - // }, - // }}, - // Locations: []*string{ - // to.Ptr("eastus")}, - // ResourceType: to.Ptr("caches"), - // Restrictions: []*armstoragecache.Restriction{ - // }, - // }, - // { - // Name: to.Ptr("Standard_8G"), - // Capabilities: []*armstoragecache.ResourceSKUCapabilities{ - // { - // Name: to.Ptr("throughput GB/s"), - // Value: to.Ptr("8"), - // }, - // { - // Name: to.Ptr("cache sizes(GB)"), - // Value: to.Ptr("12288,24576,49152"), - // }}, - // LocationInfo: []*armstoragecache.ResourceSKULocationInfo{ - // { - // Location: to.Ptr("eastus"), - // Zones: []*string{ - // }, - // }}, - // Locations: []*string{ - // to.Ptr("eastus")}, - // ResourceType: to.Ptr("caches"), - // Restrictions: []*armstoragecache.Restriction{ - // }, - // }}, - // } - } -} diff --git a/sdk/resourcemanager/storagecache/armstoragecache/storagetarget_client.go b/sdk/resourcemanager/storagecache/armstoragecache/storagetarget_client.go index c3ead32efb5e..323aed563887 100644 --- a/sdk/resourcemanager/storagecache/armstoragecache/storagetarget_client.go +++ b/sdk/resourcemanager/storagecache/armstoragecache/storagetarget_client.go @@ -47,7 +47,7 @@ func NewStorageTargetClient(subscriptionID string, credential azcore.TokenCreden // target's namespace will return errors until the flush operation completes. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - cacheName - Name of cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class. // - storageTargetName - Name of Storage Target. @@ -75,7 +75,7 @@ func (client *StorageTargetClient) BeginFlush(ctx context.Context, resourceGroup // target's namespace will return errors until the flush operation completes. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 func (client *StorageTargetClient) flush(ctx context.Context, resourceGroupName string, cacheName string, storageTargetName string, options *StorageTargetClientBeginFlushOptions) (*http.Response, error) { var err error const operationName = "StorageTargetClient.BeginFlush" @@ -121,7 +121,7 @@ func (client *StorageTargetClient) flushCreateRequest(ctx context.Context, resou return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -131,7 +131,7 @@ func (client *StorageTargetClient) flushCreateRequest(ctx context.Context, resou // end on the next request. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - cacheName - Name of cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class. // - storageTargetName - Name of Storage Target. @@ -159,7 +159,7 @@ func (client *StorageTargetClient) BeginInvalidate(ctx context.Context, resource // on the next request. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 func (client *StorageTargetClient) invalidate(ctx context.Context, resourceGroupName string, cacheName string, storageTargetName string, options *StorageTargetClientBeginInvalidateOptions) (*http.Response, error) { var err error const operationName = "StorageTargetClient.BeginInvalidate" @@ -205,7 +205,7 @@ func (client *StorageTargetClient) invalidateCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -214,7 +214,7 @@ func (client *StorageTargetClient) invalidateCreateRequest(ctx context.Context, // BeginResume - Resumes client access to a previously suspended storage target. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - cacheName - Name of cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class. // - storageTargetName - Name of Storage Target. @@ -241,7 +241,7 @@ func (client *StorageTargetClient) BeginResume(ctx context.Context, resourceGrou // Resume - Resumes client access to a previously suspended storage target. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 func (client *StorageTargetClient) resume(ctx context.Context, resourceGroupName string, cacheName string, storageTargetName string, options *StorageTargetClientBeginResumeOptions) (*http.Response, error) { var err error const operationName = "StorageTargetClient.BeginResume" @@ -287,7 +287,7 @@ func (client *StorageTargetClient) resumeCreateRequest(ctx context.Context, reso return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -296,7 +296,7 @@ func (client *StorageTargetClient) resumeCreateRequest(ctx context.Context, reso // BeginSuspend - Suspends client access to a storage target. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - cacheName - Name of cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class. // - storageTargetName - Name of Storage Target. @@ -323,7 +323,7 @@ func (client *StorageTargetClient) BeginSuspend(ctx context.Context, resourceGro // Suspend - Suspends client access to a storage target. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 func (client *StorageTargetClient) suspend(ctx context.Context, resourceGroupName string, cacheName string, storageTargetName string, options *StorageTargetClientBeginSuspendOptions) (*http.Response, error) { var err error const operationName = "StorageTargetClient.BeginSuspend" @@ -369,7 +369,7 @@ func (client *StorageTargetClient) suspendCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/sdk/resourcemanager/storagecache/armstoragecache/storagetarget_client_example_test.go b/sdk/resourcemanager/storagecache/armstoragecache/storagetarget_client_example_test.go deleted file mode 100644 index 350d27846215..000000000000 --- a/sdk/resourcemanager/storagecache/armstoragecache/storagetarget_client_example_test.go +++ /dev/null @@ -1,102 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armstoragecache_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storagecache/armstoragecache/v4" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/StorageTargets_Flush.json -func ExampleStorageTargetClient_BeginFlush() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewStorageTargetClient().BeginFlush(ctx, "scgroup", "sc", "st1", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/StorageTargets_Suspend.json -func ExampleStorageTargetClient_BeginSuspend() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewStorageTargetClient().BeginSuspend(ctx, "scgroup", "sc", "st1", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/StorageTargets_Resume.json -func ExampleStorageTargetClient_BeginResume() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewStorageTargetClient().BeginResume(ctx, "scgroup", "sc", "st1", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/StorageTargets_Invalidate.json -func ExampleStorageTargetClient_BeginInvalidate() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewStorageTargetClient().BeginInvalidate(ctx, "scgroup", "sc", "st1", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} diff --git a/sdk/resourcemanager/storagecache/armstoragecache/storagetargets_client.go b/sdk/resourcemanager/storagecache/armstoragecache/storagetargets_client.go index 5be532658fc4..f82c95695d4b 100644 --- a/sdk/resourcemanager/storagecache/armstoragecache/storagetargets_client.go +++ b/sdk/resourcemanager/storagecache/armstoragecache/storagetargets_client.go @@ -48,7 +48,7 @@ func NewStorageTargetsClient(subscriptionID string, credential azcore.TokenCrede // is healthy again. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - cacheName - Name of cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class. // - storageTargetName - Name of Storage Target. @@ -77,7 +77,7 @@ func (client *StorageTargetsClient) BeginCreateOrUpdate(ctx context.Context, res // is healthy again. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 func (client *StorageTargetsClient) createOrUpdate(ctx context.Context, resourceGroupName string, cacheName string, storageTargetName string, storagetarget StorageTarget, options *StorageTargetsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "StorageTargetsClient.BeginCreateOrUpdate" @@ -123,7 +123,7 @@ func (client *StorageTargetsClient) createOrUpdateCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, storagetarget); err != nil { @@ -135,7 +135,7 @@ func (client *StorageTargetsClient) createOrUpdateCreateRequest(ctx context.Cont // BeginDNSRefresh - Tells a storage target to refresh its DNS information. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - cacheName - Name of cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class. // - storageTargetName - Name of Storage Target. @@ -162,7 +162,7 @@ func (client *StorageTargetsClient) BeginDNSRefresh(ctx context.Context, resourc // DNSRefresh - Tells a storage target to refresh its DNS information. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 func (client *StorageTargetsClient) dNSRefresh(ctx context.Context, resourceGroupName string, cacheName string, storageTargetName string, options *StorageTargetsClientBeginDNSRefreshOptions) (*http.Response, error) { var err error const operationName = "StorageTargetsClient.BeginDNSRefresh" @@ -208,7 +208,7 @@ func (client *StorageTargetsClient) dnsRefreshCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -220,7 +220,7 @@ func (client *StorageTargetsClient) dnsRefreshCreateRequest(ctx context.Context, // will be deleted. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - cacheName - Name of cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class. // - storageTargetName - Name of Storage Target. @@ -249,7 +249,7 @@ func (client *StorageTargetsClient) BeginDelete(ctx context.Context, resourceGro // will be deleted. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 func (client *StorageTargetsClient) deleteOperation(ctx context.Context, resourceGroupName string, cacheName string, storageTargetName string, options *StorageTargetsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "StorageTargetsClient.BeginDelete" @@ -295,7 +295,7 @@ func (client *StorageTargetsClient) deleteCreateRequest(ctx context.Context, res return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") if options != nil && options.Force != nil { reqQP.Set("force", *options.Force) } @@ -307,7 +307,7 @@ func (client *StorageTargetsClient) deleteCreateRequest(ctx context.Context, res // Get - Returns a Storage Target from a cache. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - cacheName - Name of cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class. // - storageTargetName - Name of Storage Target. @@ -358,7 +358,7 @@ func (client *StorageTargetsClient) getCreateRequest(ctx context.Context, resour return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -375,7 +375,7 @@ func (client *StorageTargetsClient) getHandleResponse(resp *http.Response) (Stor // NewListByCachePager - Returns a list of Storage Targets for the specified cache. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - cacheName - Name of cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class. // - options - StorageTargetsClientListByCacheOptions contains the optional parameters for the StorageTargetsClient.NewListByCachePager @@ -423,7 +423,7 @@ func (client *StorageTargetsClient) listByCacheCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -441,7 +441,7 @@ func (client *StorageTargetsClient) listByCacheHandleResponse(resp *http.Respons // BeginRestoreDefaults - Tells a storage target to restore its settings to their default values. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - resourceGroupName - The name of the resource group. The name is case insensitive. // - cacheName - Name of cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class. // - storageTargetName - Name of Storage Target. @@ -468,7 +468,7 @@ func (client *StorageTargetsClient) BeginRestoreDefaults(ctx context.Context, re // RestoreDefaults - Tells a storage target to restore its settings to their default values. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 func (client *StorageTargetsClient) restoreDefaults(ctx context.Context, resourceGroupName string, cacheName string, storageTargetName string, options *StorageTargetsClientBeginRestoreDefaultsOptions) (*http.Response, error) { var err error const operationName = "StorageTargetsClient.BeginRestoreDefaults" @@ -514,7 +514,7 @@ func (client *StorageTargetsClient) restoreDefaultsCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/sdk/resourcemanager/storagecache/armstoragecache/storagetargets_client_example_test.go b/sdk/resourcemanager/storagecache/armstoragecache/storagetargets_client_example_test.go deleted file mode 100644 index 6cc9fb28f304..000000000000 --- a/sdk/resourcemanager/storagecache/armstoragecache/storagetargets_client_example_test.go +++ /dev/null @@ -1,459 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armstoragecache_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storagecache/armstoragecache/v4" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/StorageTargets_DnsRefresh.json -func ExampleStorageTargetsClient_BeginDNSRefresh() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewStorageTargetsClient().BeginDNSRefresh(ctx, "scgroup", "sc", "st1", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/StorageTargets_ListByCache.json -func ExampleStorageTargetsClient_NewListByCachePager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewStorageTargetsClient().NewListByCachePager("scgroup", "sc1", nil) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.StorageTargetsResult = armstoragecache.StorageTargetsResult{ - // Value: []*armstoragecache.StorageTarget{ - // { - // Name: to.Ptr("st1"), - // Type: to.Ptr("Microsoft.StorageCache/Cache/StorageTarget"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.StorageCache/caches/sc1/storagetargets/st1"), - // SystemData: &armstoragecache.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T17:18:19.123Z"); return t}()), - // CreatedBy: to.Ptr("user1"), - // CreatedByType: to.Ptr(armstoragecache.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-02T17:18:19.123Z"); return t}()), - // LastModifiedBy: to.Ptr("user2"), - // LastModifiedByType: to.Ptr(armstoragecache.CreatedByTypeUser), - // }, - // Properties: &armstoragecache.StorageTargetProperties{ - // AllocationPercentage: to.Ptr[int32](25), - // Junctions: []*armstoragecache.NamespaceJunction{ - // { - // NamespacePath: to.Ptr("/path/on/cache"), - // NfsAccessPolicy: to.Ptr("default"), - // NfsExport: to.Ptr("exp1"), - // TargetPath: to.Ptr("/path/on/exp1"), - // }, - // { - // NamespacePath: to.Ptr("/path2/on/cache"), - // NfsAccessPolicy: to.Ptr("default"), - // NfsExport: to.Ptr("exp2"), - // TargetPath: to.Ptr("/path2/on/exp2"), - // }}, - // Nfs3: &armstoragecache.Nfs3Target{ - // Target: to.Ptr("10.0.44.44"), - // UsageModel: to.Ptr("READ_ONLY"), - // }, - // State: to.Ptr(armstoragecache.OperationalStateTypeReady), - // TargetType: to.Ptr(armstoragecache.StorageTargetTypeNfs3), - // }, - // }, - // { - // Name: to.Ptr("st2"), - // Type: to.Ptr("Microsoft.StorageCache/Cache/StorageTarget"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.StorageCache/caches/sc1/storagetargets/st2"), - // SystemData: &armstoragecache.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T17:18:19.123Z"); return t}()), - // CreatedBy: to.Ptr("user1"), - // CreatedByType: to.Ptr(armstoragecache.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-02T17:18:19.123Z"); return t}()), - // LastModifiedBy: to.Ptr("user2"), - // LastModifiedByType: to.Ptr(armstoragecache.CreatedByTypeUser), - // }, - // Properties: &armstoragecache.StorageTargetProperties{ - // AllocationPercentage: to.Ptr[int32](50), - // Clfs: &armstoragecache.ClfsTarget{ - // Target: to.Ptr("https://contoso123.blob.core.windows.net/contoso123"), - // }, - // Junctions: []*armstoragecache.NamespaceJunction{ - // { - // NamespacePath: to.Ptr("/some/arbitrary/place/on/cache"), - // NfsAccessPolicy: to.Ptr("default"), - // TargetPath: to.Ptr("/"), - // }}, - // State: to.Ptr(armstoragecache.OperationalStateTypeReady), - // TargetType: to.Ptr(armstoragecache.StorageTargetTypeClfs), - // }, - // }, - // { - // Name: to.Ptr("st3"), - // Type: to.Ptr("Microsoft.StorageCache/Cache/StorageTarget"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.StorageCache/caches/sc1/storagetargets/st3"), - // SystemData: &armstoragecache.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T17:18:19.123Z"); return t}()), - // CreatedBy: to.Ptr("user1"), - // CreatedByType: to.Ptr(armstoragecache.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-02T17:18:19.123Z"); return t}()), - // LastModifiedBy: to.Ptr("user2"), - // LastModifiedByType: to.Ptr(armstoragecache.CreatedByTypeUser), - // }, - // Properties: &armstoragecache.StorageTargetProperties{ - // AllocationPercentage: to.Ptr[int32](25), - // Junctions: []*armstoragecache.NamespaceJunction{ - // { - // NamespacePath: to.Ptr("/some/crazier/place/on/cache"), - // NfsAccessPolicy: to.Ptr("default"), - // NfsExport: to.Ptr(""), - // TargetPath: to.Ptr("/"), - // }}, - // State: to.Ptr(armstoragecache.OperationalStateTypeReady), - // TargetType: to.Ptr(armstoragecache.StorageTargetTypeUnknown), - // Unknown: &armstoragecache.UnknownTarget{ - // Attributes: map[string]*string{ - // "foo": to.Ptr("bar"), - // "foo2": to.Ptr("test"), - // }, - // }, - // }, - // }}, - // } - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/StorageTargets_Delete.json -func ExampleStorageTargetsClient_BeginDelete() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewStorageTargetsClient().BeginDelete(ctx, "scgroup", "sc1", "st1", &armstoragecache.StorageTargetsClientBeginDeleteOptions{Force: nil}) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/StorageTargets_Get.json -func ExampleStorageTargetsClient_Get() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - res, err := clientFactory.NewStorageTargetsClient().Get(ctx, "scgroup", "sc1", "st1", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.StorageTarget = armstoragecache.StorageTarget{ - // Name: to.Ptr("st1"), - // Type: to.Ptr("Microsoft.StorageCache/Cache/StorageTarget"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.StorageCache/caches/sc1/storagetargets/st1"), - // SystemData: &armstoragecache.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T17:18:19.123Z"); return t}()), - // CreatedBy: to.Ptr("user1"), - // CreatedByType: to.Ptr(armstoragecache.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-02T17:18:19.123Z"); return t}()), - // LastModifiedBy: to.Ptr("user2"), - // LastModifiedByType: to.Ptr(armstoragecache.CreatedByTypeUser), - // }, - // Properties: &armstoragecache.StorageTargetProperties{ - // AllocationPercentage: to.Ptr[int32](25), - // Junctions: []*armstoragecache.NamespaceJunction{ - // { - // NamespacePath: to.Ptr("/path/on/cache"), - // NfsAccessPolicy: to.Ptr("default"), - // NfsExport: to.Ptr("exp1"), - // TargetPath: to.Ptr("/path/on/exp1"), - // }, - // { - // NamespacePath: to.Ptr("/path2/on/cache"), - // NfsAccessPolicy: to.Ptr("default"), - // NfsExport: to.Ptr("exp2"), - // TargetPath: to.Ptr("/path2/on/exp2"), - // }}, - // Nfs3: &armstoragecache.Nfs3Target{ - // Target: to.Ptr("10.0.44.44"), - // UsageModel: to.Ptr("READ_HEAVY_FREQ"), - // }, - // State: to.Ptr(armstoragecache.OperationalStateTypeReady), - // TargetType: to.Ptr(armstoragecache.StorageTargetTypeNfs3), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/StorageTargets_CreateOrUpdate.json -func ExampleStorageTargetsClient_BeginCreateOrUpdate_storageTargetsCreateOrUpdate() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewStorageTargetsClient().BeginCreateOrUpdate(ctx, "scgroup", "sc1", "st1", armstoragecache.StorageTarget{ - Properties: &armstoragecache.StorageTargetProperties{ - Junctions: []*armstoragecache.NamespaceJunction{ - { - NamespacePath: to.Ptr("/path/on/cache"), - NfsAccessPolicy: to.Ptr("default"), - NfsExport: to.Ptr("exp1"), - TargetPath: to.Ptr("/path/on/exp1"), - }, - { - NamespacePath: to.Ptr("/path2/on/cache"), - NfsAccessPolicy: to.Ptr("rootSquash"), - NfsExport: to.Ptr("exp2"), - TargetPath: to.Ptr("/path2/on/exp2"), - }}, - Nfs3: &armstoragecache.Nfs3Target{ - Target: to.Ptr("10.0.44.44"), - UsageModel: to.Ptr("READ_ONLY"), - VerificationTimer: to.Ptr[int32](30), - }, - TargetType: to.Ptr(armstoragecache.StorageTargetTypeNfs3), - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.StorageTarget = armstoragecache.StorageTarget{ - // Name: to.Ptr("st1"), - // Type: to.Ptr("Microsoft.StorageCache/Cache/StorageTarget"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.StorageCache/caches/sc1/storagetargets/st1"), - // SystemData: &armstoragecache.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T17:18:19.123Z"); return t}()), - // CreatedBy: to.Ptr("user1"), - // CreatedByType: to.Ptr(armstoragecache.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-02T17:18:19.123Z"); return t}()), - // LastModifiedBy: to.Ptr("user2"), - // LastModifiedByType: to.Ptr(armstoragecache.CreatedByTypeUser), - // }, - // Properties: &armstoragecache.StorageTargetProperties{ - // Junctions: []*armstoragecache.NamespaceJunction{ - // { - // NamespacePath: to.Ptr("/path/on/cache"), - // NfsAccessPolicy: to.Ptr("default"), - // NfsExport: to.Ptr("exp1"), - // TargetPath: to.Ptr("/path/on/exp1"), - // }, - // { - // NamespacePath: to.Ptr("/path2/on/cache"), - // NfsAccessPolicy: to.Ptr("rootSquash"), - // NfsExport: to.Ptr("exp2"), - // TargetPath: to.Ptr("/path2/on/exp2"), - // }}, - // Nfs3: &armstoragecache.Nfs3Target{ - // Target: to.Ptr("10.0.44.44"), - // UsageModel: to.Ptr("READ_ONLY"), - // VerificationTimer: to.Ptr[int32](30), - // }, - // State: to.Ptr(armstoragecache.OperationalStateTypeReady), - // TargetType: to.Ptr(armstoragecache.StorageTargetTypeNfs3), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/StorageTargets_CreateOrUpdate_BlobNfs.json -func ExampleStorageTargetsClient_BeginCreateOrUpdate_storageTargetsCreateOrUpdateBlobNfs() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewStorageTargetsClient().BeginCreateOrUpdate(ctx, "scgroup", "sc1", "st1", armstoragecache.StorageTarget{ - Properties: &armstoragecache.StorageTargetProperties{ - BlobNfs: &armstoragecache.BlobNfsTarget{ - Target: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Storage/storageAccounts/blofnfs/blobServices/default/containers/blobnfs"), - UsageModel: to.Ptr("READ_WRITE"), - VerificationTimer: to.Ptr[int32](28800), - WriteBackTimer: to.Ptr[int32](3600), - }, - Junctions: []*armstoragecache.NamespaceJunction{ - { - NamespacePath: to.Ptr("/blobnfs"), - }}, - TargetType: to.Ptr(armstoragecache.StorageTargetTypeBlobNfs), - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.StorageTarget = armstoragecache.StorageTarget{ - // Name: to.Ptr("st1"), - // Type: to.Ptr("Microsoft.StorageCache/Cache/StorageTarget"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.StorageCache/caches/sc1/storagetargets/st1"), - // SystemData: &armstoragecache.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T17:18:19.123Z"); return t}()), - // CreatedBy: to.Ptr("user1"), - // CreatedByType: to.Ptr(armstoragecache.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-02T17:18:19.123Z"); return t}()), - // LastModifiedBy: to.Ptr("user2"), - // LastModifiedByType: to.Ptr(armstoragecache.CreatedByTypeUser), - // }, - // Properties: &armstoragecache.StorageTargetProperties{ - // BlobNfs: &armstoragecache.BlobNfsTarget{ - // Target: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Storage/storageAccounts/blofnfs/blobServices/default/containers/blobnfs"), - // UsageModel: to.Ptr("READ_WRITE"), - // VerificationTimer: to.Ptr[int32](28800), - // WriteBackTimer: to.Ptr[int32](3600), - // }, - // Junctions: []*armstoragecache.NamespaceJunction{ - // { - // NamespacePath: to.Ptr("/blobnfs"), - // }}, - // State: to.Ptr(armstoragecache.OperationalStateTypeReady), - // TargetType: to.Ptr(armstoragecache.StorageTargetTypeBlobNfs), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/StorageTargets_CreateOrUpdate_NoJunctions.json -func ExampleStorageTargetsClient_BeginCreateOrUpdate_storageTargetsCreateOrUpdateNoJunctions() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewStorageTargetsClient().BeginCreateOrUpdate(ctx, "scgroup", "sc1", "st1", armstoragecache.StorageTarget{ - Properties: &armstoragecache.StorageTargetProperties{ - Nfs3: &armstoragecache.Nfs3Target{ - Target: to.Ptr("10.0.44.44"), - UsageModel: to.Ptr("READ_ONLY"), - VerificationTimer: to.Ptr[int32](30), - }, - TargetType: to.Ptr(armstoragecache.StorageTargetTypeNfs3), - }, - }, nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - res, err := poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } - // You could use response here. We use blank identifier for just demo purposes. - _ = res - // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // res.StorageTarget = armstoragecache.StorageTarget{ - // Name: to.Ptr("st1"), - // Type: to.Ptr("Microsoft.StorageCache/Cache/StorageTarget"), - // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.StorageCache/caches/sc1/storagetargets/st1"), - // SystemData: &armstoragecache.SystemData{ - // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-01T17:18:19.123Z"); return t}()), - // CreatedBy: to.Ptr("user1"), - // CreatedByType: to.Ptr(armstoragecache.CreatedByTypeUser), - // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-01-02T17:18:19.123Z"); return t}()), - // LastModifiedBy: to.Ptr("user2"), - // LastModifiedByType: to.Ptr(armstoragecache.CreatedByTypeUser), - // }, - // Properties: &armstoragecache.StorageTargetProperties{ - // Nfs3: &armstoragecache.Nfs3Target{ - // Target: to.Ptr("10.0.44.44"), - // UsageModel: to.Ptr("READ_ONLY"), - // VerificationTimer: to.Ptr[int32](30), - // }, - // State: to.Ptr(armstoragecache.OperationalStateTypeReady), - // TargetType: to.Ptr(armstoragecache.StorageTargetTypeNfs3), - // }, - // } -} - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/StorageTargets_RestoreDefaults.json -func ExampleStorageTargetsClient_BeginRestoreDefaults() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - poller, err := clientFactory.NewStorageTargetsClient().BeginRestoreDefaults(ctx, "scgroup", "sc", "st1", nil) - if err != nil { - log.Fatalf("failed to finish the request: %v", err) - } - _, err = poller.PollUntilDone(ctx, nil) - if err != nil { - log.Fatalf("failed to pull the result: %v", err) - } -} diff --git a/sdk/resourcemanager/storagecache/armstoragecache/usagemodels_client.go b/sdk/resourcemanager/storagecache/armstoragecache/usagemodels_client.go index c3909f2579c6..42e045b81acf 100644 --- a/sdk/resourcemanager/storagecache/armstoragecache/usagemodels_client.go +++ b/sdk/resourcemanager/storagecache/armstoragecache/usagemodels_client.go @@ -45,7 +45,7 @@ func NewUsageModelsClient(subscriptionID string, credential azcore.TokenCredenti // NewListPager - Get the list of cache usage models available to this subscription. // -// Generated from API version 2024-03-01 +// Generated from API version 2024-07-01 // - options - UsageModelsClientListOptions contains the optional parameters for the UsageModelsClient.NewListPager method. func (client *UsageModelsClient) NewListPager(options *UsageModelsClientListOptions) *runtime.Pager[UsageModelsClientListResponse] { return runtime.NewPager(runtime.PagingHandler[UsageModelsClientListResponse]{ @@ -82,7 +82,7 @@ func (client *UsageModelsClient) listCreateRequest(ctx context.Context, options return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2024-03-01") + reqQP.Set("api-version", "2024-07-01") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/sdk/resourcemanager/storagecache/armstoragecache/usagemodels_client_example_test.go b/sdk/resourcemanager/storagecache/armstoragecache/usagemodels_client_example_test.go deleted file mode 100644 index 51dd0f421797..000000000000 --- a/sdk/resourcemanager/storagecache/armstoragecache/usagemodels_client_example_test.go +++ /dev/null @@ -1,60 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package armstoragecache_test - -import ( - "context" - "log" - - "github.com/Azure/azure-sdk-for-go/sdk/azidentity" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storagecache/armstoragecache/v4" -) - -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/33c4457b1d13f83965f4fe3367dca4a6df898100/specification/storagecache/resource-manager/Microsoft.StorageCache/stable/2024-03-01/examples/UsageModels_List.json -func ExampleUsageModelsClient_NewListPager() { - cred, err := azidentity.NewDefaultAzureCredential(nil) - if err != nil { - log.Fatalf("failed to obtain a credential: %v", err) - } - ctx := context.Background() - clientFactory, err := armstoragecache.NewClientFactory("", cred, nil) - if err != nil { - log.Fatalf("failed to create client: %v", err) - } - pager := clientFactory.NewUsageModelsClient().NewListPager(nil) - for pager.More() { - page, err := pager.NextPage(ctx) - if err != nil { - log.Fatalf("failed to advance page: %v", err) - } - for _, v := range page.Value { - // You could use page here. We use blank identifier for just demo purposes. - _ = v - } - // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. - // page.UsageModelsResult = armstoragecache.UsageModelsResult{ - // Value: []*armstoragecache.UsageModel{ - // { - // Display: &armstoragecache.UsageModelDisplay{ - // Description: to.Ptr("Read only, with a default verification timer of 30 seconds. Verification timer has a minimum value of 1 and maximum value of 31536000. Write-back timer must have value of 0 or be null."), - // }, - // ModelName: to.Ptr("READ_ONLY"), - // TargetType: to.Ptr("nfs3"), - // }, - // { - // Display: &armstoragecache.UsageModelDisplay{ - // Description: to.Ptr("Read-write, with a default verification timer of 8 hours and default write-back timer of 1 hour. Verification timer and write-back timer have a minimum value of 1 and maximum value of 31536000."), - // }, - // ModelName: to.Ptr("READ_WRITE"), - // TargetType: to.Ptr("nfs3"), - // }}, - // } - } -}